bitkeeper revision 1.1506 (428f8748oAPuAqxeI4b_UUMZQok4QQ)
authorkaf24@viper.(none) <kaf24@viper.(none)>
Sat, 21 May 2005 19:08:56 +0000 (19:08 +0000)
committerkaf24@viper.(none) <kaf24@viper.(none)>
Sat, 21 May 2005 19:08:56 +0000 (19:08 +0000)
Checksum offload for local virtual networking, and to/from a physical
interface that may be connected via a virtual bridge or router. This adds
a coupel of new fields to skbuffs that are intended to survive across IP
or MAC level forwarding. I've tested basic connectivity with this patch,
but further stress-testing and performance benchmarking is really required.
Signed-off-by: Keir Fraser <keir@xensource.com>
.rootkeys
linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c
linux-2.6.11-xen-sparse/drivers/xen/netback/loopback.c
linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c
linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
linux-2.6.11-xen-sparse/include/linux/skbuff.h [new file with mode: 0644]
linux-2.6.11-xen-sparse/net/core/dev.c [new file with mode: 0644]
linux-2.6.11-xen-sparse/net/core/skbuff.c [new file with mode: 0644]
xen/include/public/io/netif.h

index af919b340794b61e813bcec1477eca849c4d581c..86ebc2f7793bbc6775c1781dc48b905be74f18db 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.11-xen-sparse/include/linux/gfp.h
 42305f545Vc5SLCUewZ2-n-P9JJhEQ linux-2.6.11-xen-sparse/include/linux/highmem.h
 419dfc609zbti8rqL60tL2dHXQ_rvQ linux-2.6.11-xen-sparse/include/linux/irq.h
+428f8747dtEZ4CfC5tb6Loe9h0Ivpg linux-2.6.11-xen-sparse/include/linux/skbuff.h
 419dfc6awx7w88wk6cG9P3mPidX6LQ linux-2.6.11-xen-sparse/kernel/irq/manage.c
 40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.11-xen-sparse/mkbuildtree
 42305f54Q6xJ1bXcQJlCQq1m-e2C8g linux-2.6.11-xen-sparse/mm/highmem.c
 412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.11-xen-sparse/mm/memory.c
 426fa4d7ooLYmFcFjJMF_ut4GFVh2Q linux-2.6.11-xen-sparse/mm/mmap.c
 410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.11-xen-sparse/mm/page_alloc.c
+428f8747Gp_X2UtgwcL0-YeYkCXxvQ linux-2.6.11-xen-sparse/net/core/dev.c
+428f8747vBdkOrip6rhWK_eEvVc8dA linux-2.6.11-xen-sparse/net/core/skbuff.c
 413cb1e4zst25MDYjg63Y-NGC5_pLg netbsd-2.0-xen-sparse/Makefile
 413cb1e5c_Mkxf_X0zimEhTKI_l4DA netbsd-2.0-xen-sparse/mkbuildtree
 413cb1e5kY_Zil7-b0kI6hvCIxBEYg netbsd-2.0-xen-sparse/nbconfig-xen
index f509a1b8ddbdb9c3fe7357a1eef8703573a09002..98c6cfa98b2e7e002ffe71515751952c4dd09c41 100644 (file)
@@ -159,6 +159,7 @@ void netif_create(netif_be_create_t *create)
     dev->get_stats       = netif_be_get_stats;
     dev->open            = net_open;
     dev->stop            = net_close;
+    dev->features        = NETIF_F_NO_CSUM;
 
     /* Disable queuing. */
     dev->tx_queue_len = 0;
index ebada3721ab09eed4bde4d6c858deb98b332f0eb..6f92f3d939b14159557758acad0dc3606eb8abb3 100644 (file)
@@ -67,6 +67,11 @@ static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
     np->stats.rx_bytes += skb->len;
     np->stats.rx_packets++;
 
+    if ( skb->ip_summed == CHECKSUM_HW )
+        skb->proto_csum_blank = 1;
+    skb->ip_summed = skb->proto_csum_valid ?
+        CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
     skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
     skb->protocol = eth_type_trans(skb, dev);
     skb->dev      = dev;
@@ -95,6 +100,8 @@ static void loopback_construct(struct net_device *dev, struct net_device *lo)
 
     dev->tx_queue_len    = 0;
 
+    dev->features        = NETIF_F_HIGHDMA | NETIF_F_LLTX;
+
     /*
      * We do not set a jumbo MTU on the interface. Otherwise the network
      * stack will try to send large packets that will get dropped by the
@@ -118,6 +125,9 @@ static int __init loopback_init(void)
     loopback_construct(dev1, dev2);
     loopback_construct(dev2, dev1);
 
+    dev1->features |= NETIF_F_NO_CSUM;
+    dev2->features |= NETIF_F_IP_CSUM;
+
     /*
      * Initialise a dummy MAC address for the 'dummy backend' interface. We
      * choose the numerically largest non-broadcast address to prevent the
index d08c296a028c8ab4cf492e8f7b8953a03e0b1db5..2631bd1399331848bfb01b278798e83b2f14bccf 100644 (file)
@@ -27,7 +27,8 @@ static int  make_rx_response(netif_t *netif,
                              u16      id, 
                              s8       st,
                              memory_t addr,
-                             u16      size);
+                             u16      size,
+                             u16      csum_valid);
 
 static void net_tx_action(unsigned long unused);
 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
@@ -154,6 +155,7 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
         __skb_put(nskb, skb->len);
         (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
         nskb->dev = skb->dev;
+        nskb->proto_csum_valid = skb->proto_csum_valid;
         dev_kfree_skb(skb);
         skb = nskb;
     }
@@ -308,7 +310,8 @@ static void net_rx_action(unsigned long unused)
 
         evtchn = netif->evtchn;
         id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
-        if ( make_rx_response(netif, id, status, mdata, size) &&
+        if ( make_rx_response(netif, id, status, mdata,
+                              size, skb->proto_csum_valid) &&
              (rx_notify[evtchn] == 0) )
         {
             rx_notify[evtchn] = 1;
@@ -646,6 +649,11 @@ static void net_tx_action(unsigned long unused)
         skb->dev      = netif->dev;
         skb->protocol = eth_type_trans(skb, skb->dev);
 
+        /* No checking needed on localhost, but remember the field is blank. */
+        skb->ip_summed        = CHECKSUM_UNNECESSARY;
+        skb->proto_csum_valid = 1;
+        skb->proto_csum_blank = txreq.csum_blank;
+
         netif->stats.rx_bytes += txreq.size;
         netif->stats.rx_packets++;
 
@@ -711,15 +719,17 @@ static int make_rx_response(netif_t *netif,
                             u16      id, 
                             s8       st,
                             memory_t addr,
-                            u16      size)
+                            u16      size,
+                            u16      csum_valid)
 {
     NETIF_RING_IDX i = netif->rx_resp_prod;
     netif_rx_response_t *resp;
 
     resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-    resp->addr   = addr;
-    resp->id     = id;
-    resp->status = (s16)size;
+    resp->addr       = addr;
+    resp->csum_valid = csum_valid;
+    resp->id         = id;
+    resp->status     = (s16)size;
     if ( st < 0 )
         resp->status = (s16)st;
     wmb();
index 97f3609fcd07f61e9f5cbd206f19d79c5e902b70..d8a14d2319b295fb3f27a8ec74ce5699e2737103 100644 (file)
@@ -473,6 +473,7 @@ static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
     tx->id   = id;
     tx->addr = virt_to_machine(skb->data);
     tx->size = skb->len;
+    tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
 
     wmb(); /* Ensure that backend will see the request. */
     np->tx->req_prod = i + 1;
@@ -573,6 +574,9 @@ static int netif_poll(struct net_device *dev, int *pbudget)
         skb->len  = rx->status;
         skb->tail = skb->data + skb->len;
 
+        if ( rx->csum_valid )
+            skb->ip_summed = CHECKSUM_UNNECESSARY;
+
         np->stats.rx_packets++;
         np->stats.rx_bytes += rx->status;
 
@@ -967,7 +971,8 @@ static int create_netdev(int handle, struct net_device **val)
     dev->get_stats       = network_get_stats;
     dev->poll            = netif_poll;
     dev->weight          = 64;
-    
+    dev->features        = NETIF_F_IP_CSUM;
+
     if ((err = register_netdev(dev)) != 0) {
         printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
         goto exit;
diff --git a/linux-2.6.11-xen-sparse/include/linux/skbuff.h b/linux-2.6.11-xen-sparse/include/linux/skbuff.h
new file mode 100644 (file)
index 0000000..dad5d9b
--- /dev/null
@@ -0,0 +1,1184 @@
+/*
+ *     Definitions for the 'struct sk_buff' memory handlers.
+ *
+ *     Authors:
+ *             Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *             Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/cache.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/poll.h>
+#include <linux/net.h>
+#include <net/checksum.h>
+
+#define HAVE_ALLOC_SKB         /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB     /* Ditto 8)                */
+#define SLAB_SKB               /* Slabified skbuffs       */
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+#define SKB_DATA_ALIGN(X)      (((X) + (SMP_CACHE_BYTES - 1)) & \
+                                ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_ORDER(X, ORDER)        (((PAGE_SIZE << (ORDER)) - (X) - \
+                                 sizeof(struct skb_shared_info)) & \
+                                 ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_HEAD(X)                (SKB_MAX_ORDER((X), 0))
+#define SKB_MAX_ALLOC          (SKB_MAX_ORDER(0, 2))
+
+/* A. Checksumming of received packets by device.
+ *
+ *     NONE: device failed to checksum this packet.
+ *             skb->csum is undefined.
+ *
+ *     UNNECESSARY: device parsed packet and wouldbe verified checksum.
+ *             skb->csum is undefined.
+ *           It is bad option, but, unfortunately, many of vendors do this.
+ *           Apparently with secret goal to sell you new device, when you
+ *           will add new protocol to your host. F.e. IPv6. 8)
+ *
+ *     HW: the most generic way. Device supplied checksum of _all_
+ *         the packet as seen by netif_rx in skb->csum.
+ *         NOTE: Even if device supports only some protocols, but
+ *         is able to produce some skb->csum, it MUST use HW,
+ *         not UNNECESSARY.
+ *
+ * B. Checksumming on output.
+ *
+ *     NONE: skb is checksummed by protocol or csum is not required.
+ *
+ *     HW: device is required to csum packet as seen by hard_start_xmit
+ *     from skb->h.raw to the end and to record the checksum
+ *     at skb->h.raw+skb->csum.
+ *
+ *     Device must show its capabilities in dev->features, set
+ *     at device setup time.
+ *     NETIF_F_HW_CSUM - it is clever device, it is able to checksum
+ *                       everything.
+ *     NETIF_F_NO_CSUM - loopback or reliable single hop media.
+ *     NETIF_F_IP_CSUM - device is dumb. It is able to csum only
+ *                       TCP/UDP over IPv4. Sigh. Vendors like this
+ *                       way by an unknown reason. Though, see comment above
+ *                       about CHECKSUM_UNNECESSARY. 8)
+ *
+ *     Any questions? No questions, good.              --ANK
+ */
+
+#ifdef __i386__
+#define NET_CALLER(arg) (*(((void **)&arg) - 1))
+#else
+#define NET_CALLER(arg) __builtin_return_address(0)
+#endif
+
+struct net_device;
+
+#ifdef CONFIG_NETFILTER
+struct nf_conntrack {
+       atomic_t use;
+       void (*destroy)(struct nf_conntrack *);
+};
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+struct nf_bridge_info {
+       atomic_t use;
+       struct net_device *physindev;
+       struct net_device *physoutdev;
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+       struct net_device *netoutdev;
+#endif
+       unsigned int mask;
+       unsigned long data[32 / sizeof(unsigned long)];
+};
+#endif
+
+#endif
+
+struct sk_buff_head {
+       /* These two members must be first. */
+       struct sk_buff  *next;
+       struct sk_buff  *prev;
+
+       __u32           qlen;
+       spinlock_t      lock;
+};
+
+struct sk_buff;
+
+/* To allow 64K frame to be packed as single skb without frag_list */
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+
+typedef struct skb_frag_struct skb_frag_t;
+
+struct skb_frag_struct {
+       struct page *page;
+       __u16 page_offset;
+       __u16 size;
+};
+
+/* This data is invariant across clones and lives at
+ * the end of the header data, ie. at skb->end.
+ */
+struct skb_shared_info {
+       atomic_t        dataref;
+       unsigned int    nr_frags;
+       unsigned short  tso_size;
+       unsigned short  tso_segs;
+       struct sk_buff  *frag_list;
+       skb_frag_t      frags[MAX_SKB_FRAGS];
+};
+
+/** 
+ *     struct sk_buff - socket buffer
+ *     @next: Next buffer in list
+ *     @prev: Previous buffer in list
+ *     @list: List we are on
+ *     @sk: Socket we are owned by
+ *     @stamp: Time we arrived
+ *     @dev: Device we arrived on/are leaving by
+ *     @input_dev: Device we arrived on
+ *      @real_dev: The real device we are using
+ *     @h: Transport layer header
+ *     @nh: Network layer header
+ *     @mac: Link layer header
+ *     @dst: FIXME: Describe this field
+ *     @cb: Control buffer. Free for use by every layer. Put private vars here
+ *     @len: Length of actual data
+ *     @data_len: Data length
+ *     @mac_len: Length of link layer header
+ *     @csum: Checksum
+ *     @__unused: Dead field, may be reused
+ *     @cloned: Head may be cloned (check refcnt to be sure)
+ *     @proto_csum_valid: Protocol csum validated since arriving at localhost
+ *     @proto_csum_blank: Protocol csum must be added before leaving localhost
+ *     @pkt_type: Packet class
+ *     @ip_summed: Driver fed us an IP checksum
+ *     @priority: Packet queueing priority
+ *     @users: User count - see {datagram,tcp}.c
+ *     @protocol: Packet protocol from driver
+ *     @security: Security level of packet
+ *     @truesize: Buffer size 
+ *     @head: Head of buffer
+ *     @data: Data head pointer
+ *     @tail: Tail pointer
+ *     @end: End pointer
+ *     @destructor: Destruct function
+ *     @nfmark: Can be used for communication between hooks
+ *     @nfcache: Cache info
+ *     @nfct: Associated connection, if any
+ *     @nfctinfo: Relationship of this skb to the connection
+ *     @nf_debug: Netfilter debugging
+ *     @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
+ *      @private: Data which is private to the HIPPI implementation
+ *     @tc_index: Traffic control index
+ */
+
+struct sk_buff {
+       /* These two members must be first. */
+       struct sk_buff          *next;
+       struct sk_buff          *prev;
+
+       struct sk_buff_head     *list;
+       struct sock             *sk;
+       struct timeval          stamp;
+       struct net_device       *dev;
+       struct net_device       *input_dev;
+       struct net_device       *real_dev;
+
+       union {
+               struct tcphdr   *th;
+               struct udphdr   *uh;
+               struct icmphdr  *icmph;
+               struct igmphdr  *igmph;
+               struct iphdr    *ipiph;
+               struct ipv6hdr  *ipv6h;
+               unsigned char   *raw;
+       } h;
+
+       union {
+               struct iphdr    *iph;
+               struct ipv6hdr  *ipv6h;
+               struct arphdr   *arph;
+               unsigned char   *raw;
+       } nh;
+
+       union {
+               unsigned char   *raw;
+       } mac;
+
+       struct  dst_entry       *dst;
+       struct  sec_path        *sp;
+
+       /*
+        * This is the control buffer. It is free to use for every
+        * layer. Please put your private variables there. If you
+        * want to keep them across layers you have to do a skb_clone()
+        * first. This is owned by whoever has the skb queued ATM.
+        */
+       char                    cb[40];
+
+       unsigned int            len,
+                               data_len,
+                               mac_len,
+                               csum;
+       unsigned char           local_df,
+                               cloned:1,
+                               proto_csum_valid:1,
+                               proto_csum_blank:1,
+                               pkt_type,
+                               ip_summed;
+       __u32                   priority;
+       unsigned short          protocol,
+                               security;
+
+       void                    (*destructor)(struct sk_buff *skb);
+#ifdef CONFIG_NETFILTER
+        unsigned long          nfmark;
+       __u32                   nfcache;
+       __u32                   nfctinfo;
+       struct nf_conntrack     *nfct;
+#ifdef CONFIG_NETFILTER_DEBUG
+        unsigned int           nf_debug;
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       struct nf_bridge_info   *nf_bridge;
+#endif
+#endif /* CONFIG_NETFILTER */
+#if defined(CONFIG_HIPPI)
+       union {
+               __u32           ifield;
+       } private;
+#endif
+#ifdef CONFIG_NET_SCHED
+       __u32                   tc_index;        /* traffic control index */
+#ifdef CONFIG_NET_CLS_ACT
+       __u32           tc_verd;               /* traffic control verdict */
+       __u32           tc_classid;            /* traffic control classid */
+#endif
+
+#endif
+
+
+       /* These elements must be at the end, see alloc_skb() for details.  */
+       unsigned int            truesize;
+       atomic_t                users;
+       unsigned char           *head,
+                               *data,
+                               *tail,
+                               *end;
+};
+
+#ifdef __KERNEL__
+/*
+ *     Handling routines are only of interest to the kernel
+ */
+#include <linux/slab.h>
+
+#include <asm/system.h>
+
+extern void           __kfree_skb(struct sk_buff *skb);
+extern struct sk_buff *alloc_skb(unsigned int size, int priority);
+extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+                                           unsigned int size, int priority);
+extern void           kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
+extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);
+extern int            pskb_expand_head(struct sk_buff *skb,
+                                       int nhead, int ntail, int gfp_mask);
+extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+                                           unsigned int headroom);
+extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+                                      int newheadroom, int newtailroom,
+                                      int priority);
+extern struct sk_buff *                skb_pad(struct sk_buff *skb, int pad);
+#define dev_kfree_skb(a)       kfree_skb(a)
+extern void          skb_over_panic(struct sk_buff *skb, int len,
+                                    void *here);
+extern void          skb_under_panic(struct sk_buff *skb, int len,
+                                     void *here);
+
+/* Internal */
+#define skb_shinfo(SKB)                ((struct skb_shared_info *)((SKB)->end))
+
+/**
+ *     skb_queue_empty - check if a queue is empty
+ *     @list: queue head
+ *
+ *     Returns true if the queue is empty, false otherwise.
+ */
+static inline int skb_queue_empty(const struct sk_buff_head *list)
+{
+       return list->next == (struct sk_buff *)list;
+}
+
+/**
+ *     skb_get - reference buffer
+ *     @skb: buffer to reference
+ *
+ *     Makes another reference to a socket buffer and returns a pointer
+ *     to the buffer.
+ */
+static inline struct sk_buff *skb_get(struct sk_buff *skb)
+{
+       atomic_inc(&skb->users);
+       return skb;
+}
+
+/*
+ * If users == 1, we are the only owner and are can avoid redundant
+ * atomic change.
+ */
+
+/**
+ *     kfree_skb - free an sk_buff
+ *     @skb: buffer to free
+ *
+ *     Drop a reference to the buffer and free it if the usage count has
+ *     hit zero.
+ */
+static inline void kfree_skb(struct sk_buff *skb)
+{
+       if (likely(atomic_read(&skb->users) == 1))
+               smp_rmb();
+       else if (likely(!atomic_dec_and_test(&skb->users)))
+               return;
+       __kfree_skb(skb);
+}
+
+/**
+ *     skb_cloned - is the buffer a clone
+ *     @skb: buffer to check
+ *
+ *     Returns true if the buffer was generated with skb_clone() and is
+ *     one of multiple shared copies of the buffer. Cloned buffers are
+ *     shared data so must not be written to under normal circumstances.
+ */
+static inline int skb_cloned(const struct sk_buff *skb)
+{
+       return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
+}
+
+/**
+ *     skb_shared - is the buffer shared
+ *     @skb: buffer to check
+ *
+ *     Returns true if more than one person has a reference to this
+ *     buffer.
+ */
+static inline int skb_shared(const struct sk_buff *skb)
+{
+       return atomic_read(&skb->users) != 1;
+}
+
+/**
+ *     skb_share_check - check if buffer is shared and if so clone it
+ *     @skb: buffer to check
+ *     @pri: priority for memory allocation
+ *
+ *     If the buffer is shared the buffer is cloned and the old copy
+ *     drops a reference. A new clone with a single reference is returned.
+ *     If the buffer is not shared the original buffer is returned. When
+ *     being called from interrupt status or with spinlocks held pri must
+ *     be GFP_ATOMIC.
+ *
+ *     NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
+{
+       might_sleep_if(pri & __GFP_WAIT);
+       if (skb_shared(skb)) {
+               struct sk_buff *nskb = skb_clone(skb, pri);
+               kfree_skb(skb);
+               skb = nskb;
+       }
+       return skb;
+}
+
+/*
+ *     Copy shared buffers into a new sk_buff. We effectively do COW on
+ *     packets to handle cases where we have a local reader and forward
+ *     and a couple of other messy ones. The normal one is tcpdumping
+ *     a packet thats being forwarded.
+ */
+
+/**
+ *     skb_unshare - make a copy of a shared buffer
+ *     @skb: buffer to check
+ *     @pri: priority for memory allocation
+ *
+ *     If the socket buffer is a clone then this function creates a new
+ *     copy of the data, drops a reference count on the old copy and returns
+ *     the new copy with the reference count at 1. If the buffer is not a clone
+ *     the original buffer is returned. When called with a spinlock held or
+ *     from interrupt state @pri must be %GFP_ATOMIC
+ *
+ *     %NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+{
+       might_sleep_if(pri & __GFP_WAIT);
+       if (skb_cloned(skb)) {
+               struct sk_buff *nskb = skb_copy(skb, pri);
+               kfree_skb(skb); /* Free our shared copy */
+               skb = nskb;
+       }
+       return skb;
+}
+
+/**
+ *     skb_peek
+ *     @list_: list to peek at
+ *
+ *     Peek an &sk_buff. Unlike most other operations you _MUST_
+ *     be careful with this one. A peek leaves the buffer on the
+ *     list and someone else may run off with it. You must hold
+ *     the appropriate locks or have a private queue to do this.
+ *
+ *     Returns %NULL for an empty list or a pointer to the head element.
+ *     The reference count is not incremented and the reference is therefore
+ *     volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+       struct sk_buff *list = ((struct sk_buff *)list_)->next;
+       if (list == (struct sk_buff *)list_)
+               list = NULL;
+       return list;
+}
+
+/**
+ *     skb_peek_tail
+ *     @list_: list to peek at
+ *
+ *     Peek an &sk_buff. Unlike most other operations you _MUST_
+ *     be careful with this one. A peek leaves the buffer on the
+ *     list and someone else may run off with it. You must hold
+ *     the appropriate locks or have a private queue to do this.
+ *
+ *     Returns %NULL for an empty list or a pointer to the tail element.
+ *     The reference count is not incremented and the reference is therefore
+ *     volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+{
+       struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+       if (list == (struct sk_buff *)list_)
+               list = NULL;
+       return list;
+}
+
+/**
+ *     skb_queue_len   - get queue length
+ *     @list_: list to measure
+ *
+ *     Return the length of an &sk_buff queue.
+ */
+static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
+{
+       return list_->qlen;
+}
+
+static inline void skb_queue_head_init(struct sk_buff_head *list)
+{
+       spin_lock_init(&list->lock);
+       list->prev = list->next = (struct sk_buff *)list;
+       list->qlen = 0;
+}
+
+/*
+ *     Insert an sk_buff at the start of a list.
+ *
+ *     The "__skb_xxxx()" functions are the non-atomic ones that
+ *     can only be called with interrupts disabled.
+ */
+
+/**
+ *     __skb_queue_head - queue a buffer at the list head
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the start of a list. This function takes no locks
+ *     and you must therefore hold required locks before calling it.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_head(struct sk_buff_head *list,
+                                   struct sk_buff *newsk)
+{
+       struct sk_buff *prev, *next;
+
+       newsk->list = list;
+       list->qlen++;
+       prev = (struct sk_buff *)list;
+       next = prev->next;
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+}
+
+/**
+ *     __skb_queue_tail - queue a buffer at the list tail
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the end of a list. This function takes no locks
+ *     and you must therefore hold required locks before calling it.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_tail(struct sk_buff_head *list,
+                                  struct sk_buff *newsk)
+{
+       struct sk_buff *prev, *next;
+
+       newsk->list = list;
+       list->qlen++;
+       next = (struct sk_buff *)list;
+       prev = next->prev;
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+}
+
+
+/**
+ *     __skb_dequeue - remove from the head of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the head of the list. This function does not take any locks
+ *     so must be used with appropriate locks held only. The head item is
+ *     returned or %NULL if the list is empty.
+ */
+extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+       struct sk_buff *next, *prev, *result;
+
+       prev = (struct sk_buff *) list;
+       next = prev->next;
+       result = NULL;
+       if (next != prev) {
+               result       = next;
+               next         = next->next;
+               list->qlen--;
+               next->prev   = prev;
+               prev->next   = next;
+               result->next = result->prev = NULL;
+               result->list = NULL;
+       }
+       return result;
+}
+
+
+/*
+ *     Insert a packet on a list.
+ */
+extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk);
+static inline void __skb_insert(struct sk_buff *newsk,
+                               struct sk_buff *prev, struct sk_buff *next,
+                               struct sk_buff_head *list)
+{
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+       newsk->list = list;
+       list->qlen++;
+}
+
+/*
+ *     Place a packet after a given packet in a list.
+ */
+extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk);
+static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+       __skb_insert(newsk, old, old->next, old->list);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern void       skb_unlink(struct sk_buff *skb);
+static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+       struct sk_buff *next, *prev;
+
+       list->qlen--;
+       next       = skb->next;
+       prev       = skb->prev;
+       skb->next  = skb->prev = NULL;
+       skb->list  = NULL;
+       next->prev = prev;
+       prev->next = next;
+}
+
+
+/* XXX: more streamlined implementation */
+
+/**
+ *     __skb_dequeue_tail - remove from the tail of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the tail of the list. This function does not take any locks
+ *     so must be used with appropriate locks held only. The tail item is
+ *     returned or %NULL if the list is empty.
+ */
+extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+{
+       struct sk_buff *skb = skb_peek_tail(list);
+       if (skb)
+               __skb_unlink(skb, list);
+       return skb;
+}
+
+
+static inline int skb_is_nonlinear(const struct sk_buff *skb)
+{
+       return skb->data_len;
+}
+
+static inline unsigned int skb_headlen(const struct sk_buff *skb)
+{
+       return skb->len - skb->data_len;
+}
+
+static inline int skb_pagelen(const struct sk_buff *skb)
+{
+       int i, len = 0;
+
+       for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
+               len += skb_shinfo(skb)->frags[i].size;
+       return len + skb_headlen(skb);
+}
+
+static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
+                                     struct page *page, int off, int size)
+{
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+       frag->page                = page;
+       frag->page_offset         = off;
+       frag->size                = size;
+       skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+#define SKB_PAGE_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->nr_frags)
+#define SKB_FRAG_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->frag_list)
+#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
+
+/*
+ *     Add data to an sk_buff
+ */
+static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+{
+       unsigned char *tmp = skb->tail;
+       SKB_LINEAR_ASSERT(skb);
+       skb->tail += len;
+       skb->len  += len;
+       return tmp;
+}
+
+/**
+ *     skb_put - add data to a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer. If this would
+ *     exceed the total buffer size the kernel will panic. A pointer to the
+ *     first byte of the extra data is returned.
+ */
+static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+{
+       unsigned char *tmp = skb->tail;
+       SKB_LINEAR_ASSERT(skb);
+       skb->tail += len;
+       skb->len  += len;
+       if (unlikely(skb->tail>skb->end))
+               skb_over_panic(skb, len, current_text_addr());
+       return tmp;
+}
+
+static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+{
+       skb->data -= len;
+       skb->len  += len;
+       return skb->data;
+}
+
+/**
+ *     skb_push - add data to the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to add
+ *
+ *     This function extends the used data area of the buffer at the buffer
+ *     start. If this would exceed the total buffer headroom the kernel will
+ *     panic. A pointer to the first byte of the extra data is returned.
+ */
+static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+{
+       skb->data -= len;
+       skb->len  += len;
+       if (unlikely(skb->data<skb->head))
+               skb_under_panic(skb, len, current_text_addr());
+       return skb->data;
+}
+
+static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
+{
+       skb->len -= len;
+       BUG_ON(skb->len < skb->data_len);
+       return skb->data += len;
+}
+
+/**
+ *     skb_pull - remove data from the start of a buffer
+ *     @skb: buffer to use
+ *     @len: amount of data to remove
+ *
+ *     This function removes data from the start of a buffer, returning
+ *     the memory to the headroom. A pointer to the next data in the buffer
+ *     is returned. Once the data has been pulled future pushes will overwrite
+ *     the old data.
+ */
+static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+{
+       return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
+extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+
+static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+       if (len > skb_headlen(skb) &&
+           !__pskb_pull_tail(skb, len-skb_headlen(skb)))
+               return NULL;
+       skb->len -= len;
+       return skb->data += len;
+}
+
+static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+       return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
+}
+
+static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+       if (likely(len <= skb_headlen(skb)))
+               return 1;
+       if (unlikely(len > skb->len))
+               return 0;
+       return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
+}
+
+/**
+ *     skb_headroom - bytes at buffer head
+ *     @skb: buffer to check
+ *
+ *     Return the number of bytes of free space at the head of an &sk_buff.
+ */
+static inline int skb_headroom(const struct sk_buff *skb)
+{
+       return skb->data - skb->head;
+}
+
+/**
+ *     skb_tailroom - bytes at buffer end
+ *     @skb: buffer to check
+ *
+ *     Return the number of bytes of free space at the tail of an sk_buff
+ */
+static inline int skb_tailroom(const struct sk_buff *skb)
+{
+       return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
+}
+
+/**
+ *     skb_reserve - adjust headroom
+ *     @skb: buffer to alter
+ *     @len: bytes to move
+ *
+ *     Increase the headroom of an empty &sk_buff by reducing the tail
+ *     room. This is only allowed for an empty buffer.
+ */
+static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+{
+       skb->data += len;
+       skb->tail += len;
+}
+
+/*
+ * CPUs often take a performance hit when accessing unaligned memory
+ * locations. The actual performance hit varies, it can be small if the
+ * hardware handles it or large if we have to take an exception and fix it
+ * in software.
+ *
+ * Since an ethernet header is 14 bytes network drivers often end up with
+ * the IP header at an unaligned offset. The IP header can be aligned by
+ * shifting the start of the packet by 2 bytes. Drivers should do this
+ * with:
+ *
+ * skb_reserve(NET_IP_ALIGN);
+ *
+ * The downside to this alignment of the IP header is that the DMA is now
+ * unaligned. On some architectures the cost of an unaligned DMA is high
+ * and this cost outweighs the gains made by aligning the IP header.
+ * 
+ * Since this trade off varies between architectures, we allow NET_IP_ALIGN
+ * to be overridden.
+ */
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN   2
+#endif
+
+extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
+
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (!skb->data_len) {
+               skb->len  = len;
+               skb->tail = skb->data + len;
+       } else
+               ___pskb_trim(skb, len, 0);
+}
+
+/**
+ *     skb_trim - remove end from a buffer
+ *     @skb: buffer to alter
+ *     @len: new length
+ *
+ *     Cut the length of a buffer down by removing data from the tail. If
+ *     the buffer is already under the length specified it is not modified.
+ */
+static inline void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (skb->len > len)
+               __skb_trim(skb, len);
+}
+
+
+static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+       if (!skb->data_len) {
+               skb->len  = len;
+               skb->tail = skb->data+len;
+               return 0;
+       }
+       return ___pskb_trim(skb, len, 1);
+}
+
+static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+       return (len < skb->len) ? __pskb_trim(skb, len) : 0;
+}
+
+/**
+ *     skb_orphan - orphan a buffer
+ *     @skb: buffer to orphan
+ *
+ *     If a buffer currently has an owner then we call the owner's
+ *     destructor function and make the @skb unowned. The buffer continues
+ *     to exist but is no longer charged to its former owner.
+ */
+static inline void skb_orphan(struct sk_buff *skb)
+{
+       if (skb->destructor)
+               skb->destructor(skb);
+       skb->destructor = NULL;
+       skb->sk         = NULL;
+}
+
+/**
+ *     __skb_queue_purge - empty a list
+ *     @list: list to empty
+ *
+ *     Delete all buffers on an &sk_buff list. Each buffer is removed from
+ *     the list and one reference dropped. This function does not take the
+ *     list lock and the caller must hold the relevant locks to use it.
+ */
+extern void skb_queue_purge(struct sk_buff_head *list);
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+       while ((skb = __skb_dequeue(list)) != NULL)
+               kfree_skb(skb);
+}
+
+/**
+ *     __dev_alloc_skb - allocate an skbuff for sending
+ *     @length: length to allocate
+ *     @gfp_mask: get_free_pages mask, passed to alloc_skb
+ *
+ *     Allocate a new &sk_buff and assign it a usage count of one. The
+ *     buffer has unspecified headroom built in. Users should allocate
+ *     the headroom they think they need without accounting for the
+ *     built in space. The built in space is used for optimisations.
+ *
+ *     %NULL is returned in there is no free memory.
+ */
+#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
+static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+                                             int gfp_mask)
+{
+       struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
+       if (likely(skb))
+               skb_reserve(skb, 16);
+       return skb;
+}
+#else
+extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
+#endif
+
+/**
+ *     dev_alloc_skb - allocate an skbuff for sending
+ *     @length: length to allocate
+ *
+ *     Allocate a new &sk_buff and assign it a usage count of one. The
+ *     buffer has unspecified headroom built in. Users should allocate
+ *     the headroom they think they need without accounting for the
+ *     built in space. The built in space is used for optimisations.
+ *
+ *     %NULL is returned in there is no free memory. Although this function
+ *     allocates memory it can be called from an interrupt.
+ */
+static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+       return __dev_alloc_skb(length, GFP_ATOMIC);
+}
+
+/**
+ *     skb_cow - copy header of skb when it is required
+ *     @skb: buffer to cow
+ *     @headroom: needed headroom
+ *
+ *     If the skb passed lacks sufficient headroom or its data part
+ *     is shared, data is reallocated. If reallocation fails, an error
+ *     is returned and original skb is not changed.
+ *
+ *     The result is skb with writable area skb->head...skb->tail
+ *     and at least @headroom of space at head.
+ */
+static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
+{
+       int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
+
+       if (delta < 0)
+               delta = 0;
+
+       if (delta || skb_cloned(skb))
+               return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
+       return 0;
+}
+
+/**
+ *     skb_padto       - pad an skbuff up to a minimal size
+ *     @skb: buffer to pad
+ *     @len: minimal length
+ *
+ *     Pads up a buffer to ensure the trailing bytes exist and are
+ *     blanked. If the buffer already contains sufficient data it
+ *     is untouched. Returns the buffer, which may be a replacement
+ *     for the original, or NULL for out of memory - in which case
+ *     the original buffer is still freed.
+ */
+static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
+{
+       unsigned int size = skb->len;
+       if (likely(size >= len))
+               return skb;
+       return skb_pad(skb, len-size);
+}
+
+static inline int skb_add_data(struct sk_buff *skb,
+                              char __user *from, int copy)
+{
+       const int off = skb->len;
+
+       if (skb->ip_summed == CHECKSUM_NONE) {
+               int err = 0;
+               unsigned int csum = csum_and_copy_from_user(from,
+                                                           skb_put(skb, copy),
+                                                           copy, 0, &err);
+               if (!err) {
+                       skb->csum = csum_block_add(skb->csum, csum, off);
+                       return 0;
+               }
+       } else if (!copy_from_user(skb_put(skb, copy), from, copy))
+               return 0;
+
+       __skb_trim(skb, off);
+       return -EFAULT;
+}
+
+static inline int skb_can_coalesce(struct sk_buff *skb, int i,
+                                  struct page *page, int off)
+{
+       if (i) {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+               return page == frag->page &&
+                      off == frag->page_offset + frag->size;
+       }
+       return 0;
+}
+
+/**
+ *     skb_linearize - convert paged skb to linear one
+ *     @skb: buffer to linarize
+ *     @gfp: allocation mode
+ *
+ *     If there is no free memory -ENOMEM is returned, otherwise zero
+ *     is returned and the old skb data released.
+ */
+extern int __skb_linearize(struct sk_buff *skb, int gfp);
+static inline int skb_linearize(struct sk_buff *skb, int gfp)
+{
+       return __skb_linearize(skb, gfp);
+}
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+       BUG_ON(in_irq());
+
+       local_bh_disable();
+#endif
+       return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+       kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+       local_bh_enable();
+#endif
+}
+
+#define skb_queue_walk(queue, skb) \
+               for (skb = (queue)->next;                                       \
+                    prefetch(skb->next), (skb != (struct sk_buff *)(queue));   \
+                    skb = skb->next)
+
+
+extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+                                        int noblock, int *err);
+extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
+                                    struct poll_table_struct *wait);
+extern int            skb_copy_datagram_iovec(const struct sk_buff *from,
+                                              int offset, struct iovec *to,
+                                              int size);
+extern int            skb_copy_and_csum_datagram_iovec(const
+                                                       struct sk_buff *skb,
+                                                       int hlen,
+                                                       struct iovec *iov);
+extern void           skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+extern unsigned int    skb_checksum(const struct sk_buff *skb, int offset,
+                                   int len, unsigned int csum);
+extern int            skb_copy_bits(const struct sk_buff *skb, int offset,
+                                    void *to, int len);
+extern unsigned int    skb_copy_and_csum_bits(const struct sk_buff *skb,
+                                             int offset, u8 *to, int len,
+                                             unsigned int csum);
+extern void           skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+extern void           skb_split(struct sk_buff *skb,
+                                struct sk_buff *skb1, const u32 len);
+
+static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+                                      int len, void *buffer)
+{
+       int hlen = skb_headlen(skb);
+
+       if (offset + len <= hlen)
+               return skb->data + offset;
+
+       if (skb_copy_bits(skb, offset, buffer, len) < 0)
+               return NULL;
+
+       return buffer;
+}
+
+extern void skb_init(void);
+extern void skb_add_mtu(int mtu);
+
+struct skb_iter {
+       /* Iteration functions set these */
+       unsigned char *data;
+       unsigned int len;
+
+       /* Private to iteration */
+       unsigned int nextfrag;
+       struct sk_buff *fraglist;
+};
+
+/* Keep iterating until skb_iter_next returns false. */
+extern void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i);
+extern int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i);
+/* Call this if aborting loop before !skb_iter_next */
+extern void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i);
+
+#ifdef CONFIG_NETFILTER
+static inline void nf_conntrack_put(struct nf_conntrack *nfct)
+{
+       if (nfct && atomic_dec_and_test(&nfct->use))
+               nfct->destroy(nfct);
+}
+static inline void nf_conntrack_get(struct nf_conntrack *nfct)
+{
+       if (nfct)
+               atomic_inc(&nfct->use);
+}
+static inline void nf_reset(struct sk_buff *skb)
+{
+       nf_conntrack_put(skb->nfct);
+       skb->nfct = NULL;
+#ifdef CONFIG_NETFILTER_DEBUG
+       skb->nf_debug = 0;
+#endif
+}
+static inline void nf_reset_debug(struct sk_buff *skb)
+{
+#ifdef CONFIG_NETFILTER_DEBUG
+       skb->nf_debug = 0;
+#endif
+}
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+{
+       if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
+               kfree(nf_bridge);
+}
+static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
+{
+       if (nf_bridge)
+               atomic_inc(&nf_bridge->use);
+}
+#endif /* CONFIG_BRIDGE_NETFILTER */
+#else /* CONFIG_NETFILTER */
+static inline void nf_reset(struct sk_buff *skb) {}
+#endif /* CONFIG_NETFILTER */
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux-2.6.11-xen-sparse/net/core/dev.c b/linux-2.6.11-xen-sparse/net/core/dev.c
new file mode 100644 (file)
index 0000000..b5e12b0
--- /dev/null
@@ -0,0 +1,3389 @@
+/*
+ *     NET3    Protocol independent device support routines.
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ *     Derived from the non IP parts of dev.c 1.0.19
+ *             Authors:        Ross Biro, <bir7@leland.Stanford.Edu>
+ *                             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *                             Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ *     Additional Authors:
+ *             Florian la Roche <rzsfl@rz.uni-sb.de>
+ *             Alan Cox <gw4pts@gw4pts.ampr.org>
+ *             David Hinds <dahinds@users.sourceforge.net>
+ *             Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ *             Adam Sulmicki <adam@cfar.umd.edu>
+ *              Pekka Riikonen <priikone@poesidon.pspt.fi>
+ *
+ *     Changes:
+ *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
+ *                                     to 2 if register_netdev gets called
+ *                                     before net_dev_init & also removed a
+ *                                     few lines of code in the process.
+ *             Alan Cox        :       device private ioctl copies fields back.
+ *             Alan Cox        :       Transmit queue code does relevant
+ *                                     stunts to keep the queue safe.
+ *             Alan Cox        :       Fixed double lock.
+ *             Alan Cox        :       Fixed promisc NULL pointer trap
+ *             ????????        :       Support the full private ioctl range
+ *             Alan Cox        :       Moved ioctl permission check into
+ *                                     drivers
+ *             Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
+ *             Alan Cox        :       100 backlog just doesn't cut it when
+ *                                     you start doing multicast video 8)
+ *             Alan Cox        :       Rewrote net_bh and list manager.
+ *             Alan Cox        :       Fix ETH_P_ALL echoback lengths.
+ *             Alan Cox        :       Took out transmit every packet pass
+ *                                     Saved a few bytes in the ioctl handler
+ *             Alan Cox        :       Network driver sets packet type before
+ *                                     calling netif_rx. Saves a function
+ *                                     call a packet.
+ *             Alan Cox        :       Hashed net_bh()
+ *             Richard Kooijman:       Timestamp fixes.
+ *             Alan Cox        :       Wrong field in SIOCGIFDSTADDR
+ *             Alan Cox        :       Device lock protection.
+ *             Alan Cox        :       Fixed nasty side effect of device close
+ *                                     changes.
+ *             Rudi Cilibrasi  :       Pass the right thing to
+ *                                     set_mac_address()
+ *             Dave Miller     :       32bit quantity for the device lock to
+ *                                     make it work out on a Sparc.
+ *             Bjorn Ekwall    :       Added KERNELD hack.
+ *             Alan Cox        :       Cleaned up the backlog initialise.
+ *             Craig Metz      :       SIOCGIFCONF fix if space for under
+ *                                     1 device.
+ *         Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
+ *                                     is no device open function.
+ *             Andi Kleen      :       Fix error reporting for SIOCGIFCONF
+ *         Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
+ *             Cyrus Durgin    :       Cleaned for KMOD
+ *             Adam Sulmicki   :       Bug Fix : Network Device Unload
+ *                                     A network device unload needs to purge
+ *                                     the backlog queue.
+ *     Paul Rusty Russell      :       SIOCSIFNAME
+ *              Pekka Riikonen  :      Netdev boot-time settings code
+ *              Andrew Morton   :       Make unregister_netdevice wait
+ *                                     indefinitely on dev->refcnt
+ *             J Hadi Salim    :       - Backlog queue sampling
+ *                                     - netif_rx() feedback
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/config.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <linux/rtnetlink.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/if_bridge.h>
+#include <linux/divert.h>
+#include <net/dst.h>
+#include <net/pkt_sched.h>
+#include <net/checksum.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/netpoll.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h>            /* Note : will define WIRELESS_EXT */
+#include <net/iw_handler.h>
+#endif /* CONFIG_NET_RADIO */
+#include <asm/current.h>
+
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+
+/* This define, if set, will randomly drop a packet when congestion
+ * is more than moderate.  It helps fairness in the multi-interface
+ * case when one of them is a hog, but it kills performance for the
+ * single interface case so it is off now by default.
+ */
+#undef RAND_LIE
+
+/* Setting this will sample the queue lengths and thus congestion
+ * via a timer instead of as each packet is received.
+ */
+#undef OFFLINE_SAMPLE
+
+/*
+ *     The list of packet types we will receive (as opposed to discard)
+ *     and the routines to invoke.
+ *
+ *     Why 16. Because with 16 the only overlap we get on a hash of the
+ *     low nibble of the protocol value is RARP/SNAP/X.25.
+ *
+ *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
+ *             sure which should go first, but I bet it won't make much
+ *             difference if we are running VLANs.  The good news is that
+ *             this protocol won't be in the list unless compiled in, so
+ *             the average user (w/out VLANs) will not be adversly affected.
+ *             --BLG
+ *
+ *             0800    IP
+ *             8100    802.1Q VLAN
+ *             0001    802.3
+ *             0002    AX.25
+ *             0004    802.2
+ *             8035    RARP
+ *             0005    SNAP
+ *             0805    X.25
+ *             0806    ARP
+ *             8137    IPX
+ *             0009    Localtalk
+ *             86DD    IPv6
+ */
+
+static DEFINE_SPINLOCK(ptype_lock);
+static struct list_head ptype_base[16];        /* 16 way hashed list */
+static struct list_head ptype_all;             /* Taps */
+
+#ifdef OFFLINE_SAMPLE
+static void sample_queue(unsigned long dummy);
+static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
+#endif
+
+/*
+ * The @dev_base list is protected by @dev_base_lock and the rtln
+ * semaphore.
+ *
+ * Pure readers hold dev_base_lock for reading.
+ *
+ * Writers must hold the rtnl semaphore while they loop through the
+ * dev_base list, and hold dev_base_lock for writing when they do the
+ * actual updates.  This allows pure readers to access the list even
+ * while a writer is preparing to update it.
+ *
+ * To put it another way, dev_base_lock is held for writing only to
+ * protect against pure readers; the rtnl semaphore provides the
+ * protection against other writers.
+ *
+ * See, for example usages, register_netdevice() and
+ * unregister_netdevice(), which must be called with the rtnl
+ * semaphore held.
+ */
+struct net_device *dev_base;
+static struct net_device **dev_tail = &dev_base;
+DEFINE_RWLOCK(dev_base_lock);
+
+EXPORT_SYMBOL(dev_base);
+EXPORT_SYMBOL(dev_base_lock);
+
+#define NETDEV_HASHBITS        8
+static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
+static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
+
+static inline struct hlist_head *dev_name_hash(const char *name)
+{
+       unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+       return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
+}
+
+static inline struct hlist_head *dev_index_hash(int ifindex)
+{
+       return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
+}
+
+/*
+ *     Our notifier list
+ */
+
+static struct notifier_block *netdev_chain;
+
+/*
+ *     Device drivers call our routines to queue packets here. We empty the
+ *     queue in the local softnet handler.
+ */
+DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
+
+#ifdef CONFIG_SYSFS
+extern int netdev_sysfs_init(void);
+extern int netdev_register_sysfs(struct net_device *);
+extern void netdev_unregister_sysfs(struct net_device *);
+#else
+#define netdev_sysfs_init()            (0)
+#define netdev_register_sysfs(dev)     (0)
+#define        netdev_unregister_sysfs(dev)    do { } while(0)
+#endif
+
+
+/*******************************************************************************
+
+               Protocol management and registration routines
+
+*******************************************************************************/
+
+/*
+ *     For efficiency
+ */
+
+int netdev_nit;
+
+/*
+ *     Add a protocol ID to the list. Now that the input handler is
+ *     smarter we can dispense with all the messy stuff that used to be
+ *     here.
+ *
+ *     BEWARE!!! Protocol handlers, mangling input packets,
+ *     MUST BE last in hash buckets and checking protocol handlers
+ *     MUST start from promiscuous ptype_all chain in net_bh.
+ *     It is true now, do not change it.
+ *     Explanation follows: if protocol handler, mangling packet, will
+ *     be the first on list, it is not able to sense, that packet
+ *     is cloned and should be copied-on-write, so that it will
+ *     change it and subsequent readers will get broken packet.
+ *                                                     --ANK (980803)
+ */
+
+/**
+ *     dev_add_pack - add packet handler
+ *     @pt: packet type declaration
+ *
+ *     Add a protocol handler to the networking stack. The passed &packet_type
+ *     is linked into kernel lists and may not be freed until it has been
+ *     removed from the kernel lists.
+ *
+ *     This call does not sleep therefore it can not 
+ *     guarantee all CPU's that are in middle of receiving packets
+ *     will see the new packet type (until the next received packet).
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+       int hash;
+
+       spin_lock_bh(&ptype_lock);
+       if (pt->type == htons(ETH_P_ALL)) {
+               netdev_nit++;
+               list_add_rcu(&pt->list, &ptype_all);
+       } else {
+               hash = ntohs(pt->type) & 15;
+               list_add_rcu(&pt->list, &ptype_base[hash]);
+       }
+       spin_unlock_bh(&ptype_lock);
+}
+
+extern void linkwatch_run_queue(void);
+
+
+
+/**
+ *     __dev_remove_pack        - remove packet handler
+ *     @pt: packet type declaration
+ *
+ *     Remove a protocol handler that was previously added to the kernel
+ *     protocol handlers by dev_add_pack(). The passed &packet_type is removed
+ *     from the kernel lists and can be freed or reused once this function
+ *     returns. 
+ *
+ *      The packet type might still be in use by receivers
+ *     and must not be freed until after all the CPU's have gone
+ *     through a quiescent state.
+ */
+void __dev_remove_pack(struct packet_type *pt)
+{
+       struct list_head *head;
+       struct packet_type *pt1;
+
+       spin_lock_bh(&ptype_lock);
+
+       if (pt->type == htons(ETH_P_ALL)) {
+               netdev_nit--;
+               head = &ptype_all;
+       } else
+               head = &ptype_base[ntohs(pt->type) & 15];
+
+       list_for_each_entry(pt1, head, list) {
+               if (pt == pt1) {
+                       list_del_rcu(&pt->list);
+                       goto out;
+               }
+       }
+
+       printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+out:
+       spin_unlock_bh(&ptype_lock);
+}
+/**
+ *     dev_remove_pack  - remove packet handler
+ *     @pt: packet type declaration
+ *
+ *     Remove a protocol handler that was previously added to the kernel
+ *     protocol handlers by dev_add_pack(). The passed &packet_type is removed
+ *     from the kernel lists and can be freed or reused once this function
+ *     returns.
+ *
+ *     This call sleeps to guarantee that no CPU is looking at the packet
+ *     type after return.
+ */
+void dev_remove_pack(struct packet_type *pt)
+{
+       __dev_remove_pack(pt);
+       
+       synchronize_net();
+}
+
+/******************************************************************************
+
+                     Device Boot-time Settings Routines
+
+*******************************************************************************/
+
+/* Boot time configuration table */
+static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
+
+/**
+ *     netdev_boot_setup_add   - add new setup entry
+ *     @name: name of the device
+ *     @map: configured settings for the device
+ *
+ *     Adds new setup entry to the dev_boot_setup list.  The function
+ *     returns 0 on error and 1 on success.  This is a generic routine to
+ *     all netdevices.
+ */
+static int netdev_boot_setup_add(char *name, struct ifmap *map)
+{
+       struct netdev_boot_setup *s;
+       int i;
+
+       s = dev_boot_setup;
+       for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+               if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
+                       memset(s[i].name, 0, sizeof(s[i].name));
+                       strcpy(s[i].name, name);
+                       memcpy(&s[i].map, map, sizeof(s[i].map));
+                       break;
+               }
+       }
+
+       return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
+}
+
+/**
+ *     netdev_boot_setup_check - check boot time settings
+ *     @dev: the netdevice
+ *
+ *     Check boot time settings for the device.
+ *     The found settings are set for the device to be used
+ *     later in the device probing.
+ *     Returns 0 if no settings found, 1 if they are.
+ */
+int netdev_boot_setup_check(struct net_device *dev)
+{
+       struct netdev_boot_setup *s = dev_boot_setup;
+       int i;
+
+       for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+               if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
+                   !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
+                       dev->irq        = s[i].map.irq;
+                       dev->base_addr  = s[i].map.base_addr;
+                       dev->mem_start  = s[i].map.mem_start;
+                       dev->mem_end    = s[i].map.mem_end;
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+
+/**
+ *     netdev_boot_base        - get address from boot time settings
+ *     @prefix: prefix for network device
+ *     @unit: id for network device
+ *
+ *     Check boot time settings for the base address of device.
+ *     The found settings are set for the device to be used
+ *     later in the device probing.
+ *     Returns 0 if no settings found.
+ */
+unsigned long netdev_boot_base(const char *prefix, int unit)
+{
+       const struct netdev_boot_setup *s = dev_boot_setup;
+       char name[IFNAMSIZ];
+       int i;
+
+       sprintf(name, "%s%d", prefix, unit);
+
+       /*
+        * If device already registered then return base of 1
+        * to indicate not to probe for this interface
+        */
+       if (__dev_get_by_name(name))
+               return 1;
+
+       for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
+               if (!strcmp(name, s[i].name))
+                       return s[i].map.base_addr;
+       return 0;
+}
+
+/*
+ * Saves at boot time configured settings for any netdevice.
+ */
+int __init netdev_boot_setup(char *str)
+{
+       int ints[5];
+       struct ifmap map;
+
+       str = get_options(str, ARRAY_SIZE(ints), ints);
+       if (!str || !*str)
+               return 0;
+
+       /* Save settings */
+       memset(&map, 0, sizeof(map));
+       if (ints[0] > 0)
+               map.irq = ints[1];
+       if (ints[0] > 1)
+               map.base_addr = ints[2];
+       if (ints[0] > 2)
+               map.mem_start = ints[3];
+       if (ints[0] > 3)
+               map.mem_end = ints[4];
+
+       /* Add new entry to the list */
+       return netdev_boot_setup_add(str, &map);
+}
+
+__setup("netdev=", netdev_boot_setup);
+
+/*******************************************************************************
+
+                           Device Interface Subroutines
+
+*******************************************************************************/
+
+/**
+ *     __dev_get_by_name       - find a device by its name
+ *     @name: name to find
+ *
+ *     Find an interface by name. Must be called under RTNL semaphore
+ *     or @dev_base_lock. If the name is found a pointer to the device
+ *     is returned. If the name is not found then %NULL is returned. The
+ *     reference counters are not incremented so the caller must be
+ *     careful with locks.
+ */
+
+struct net_device *__dev_get_by_name(const char *name)
+{
+       struct hlist_node *p;
+
+       hlist_for_each(p, dev_name_hash(name)) {
+               struct net_device *dev
+                       = hlist_entry(p, struct net_device, name_hlist);
+               if (!strncmp(dev->name, name, IFNAMSIZ))
+                       return dev;
+       }
+       return NULL;
+}
+
+/**
+ *     dev_get_by_name         - find a device by its name
+ *     @name: name to find
+ *
+ *     Find an interface by name. This can be called from any
+ *     context and does its own locking. The returned handle has
+ *     the usage count incremented and the caller must use dev_put() to
+ *     release it when it is no longer needed. %NULL is returned if no
+ *     matching device is found.
+ */
+
+struct net_device *dev_get_by_name(const char *name)
+{
+       struct net_device *dev;
+
+       read_lock(&dev_base_lock);
+       dev = __dev_get_by_name(name);
+       if (dev)
+               dev_hold(dev);
+       read_unlock(&dev_base_lock);
+       return dev;
+}
+
+/**
+ *     __dev_get_by_index - find a device by its ifindex
+ *     @ifindex: index of device
+ *
+ *     Search for an interface by index. Returns %NULL if the device
+ *     is not found or a pointer to the device. The device has not
+ *     had its reference counter increased so the caller must be careful
+ *     about locking. The caller must hold either the RTNL semaphore
+ *     or @dev_base_lock.
+ */
+
+struct net_device *__dev_get_by_index(int ifindex)
+{
+       struct hlist_node *p;
+
+       hlist_for_each(p, dev_index_hash(ifindex)) {
+               struct net_device *dev
+                       = hlist_entry(p, struct net_device, index_hlist);
+               if (dev->ifindex == ifindex)
+                       return dev;
+       }
+       return NULL;
+}
+
+
+/**
+ *     dev_get_by_index - find a device by its ifindex
+ *     @ifindex: index of device
+ *
+ *     Search for an interface by index. Returns NULL if the device
+ *     is not found or a pointer to the device. The device returned has
+ *     had a reference added and the pointer is safe until the user calls
+ *     dev_put to indicate they have finished with it.
+ */
+
+struct net_device *dev_get_by_index(int ifindex)
+{
+       struct net_device *dev;
+
+       read_lock(&dev_base_lock);
+       dev = __dev_get_by_index(ifindex);
+       if (dev)
+               dev_hold(dev);
+       read_unlock(&dev_base_lock);
+       return dev;
+}
+
+/**
+ *     dev_getbyhwaddr - find a device by its hardware address
+ *     @type: media type of device
+ *     @ha: hardware address
+ *
+ *     Search for an interface by MAC address. Returns NULL if the device
+ *     is not found or a pointer to the device. The caller must hold the
+ *     rtnl semaphore. The returned device has not had its ref count increased
+ *     and the caller must therefore be careful about locking
+ *
+ *     BUGS:
+ *     If the API was consistent this would be __dev_get_by_hwaddr
+ */
+
+struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
+{
+       struct net_device *dev;
+
+       ASSERT_RTNL();
+
+       for (dev = dev_base; dev; dev = dev->next)
+               if (dev->type == type &&
+                   !memcmp(dev->dev_addr, ha, dev->addr_len))
+                       break;
+       return dev;
+}
+
+struct net_device *dev_getfirstbyhwtype(unsigned short type)
+{
+       struct net_device *dev;
+
+       rtnl_lock();
+       for (dev = dev_base; dev; dev = dev->next) {
+               if (dev->type == type) {
+                       dev_hold(dev);
+                       break;
+               }
+       }
+       rtnl_unlock();
+       return dev;
+}
+
+EXPORT_SYMBOL(dev_getfirstbyhwtype);
+
+/**
+ *     dev_get_by_flags - find any device with given flags
+ *     @if_flags: IFF_* values
+ *     @mask: bitmask of bits in if_flags to check
+ *
+ *     Search for any interface with the given flags. Returns NULL if a device
+ *     is not found or a pointer to the device. The device returned has 
+ *     had a reference added and the pointer is safe until the user calls
+ *     dev_put to indicate they have finished with it.
+ */
+
+struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
+{
+       struct net_device *dev;
+
+       read_lock(&dev_base_lock);
+       for (dev = dev_base; dev != NULL; dev = dev->next) {
+               if (((dev->flags ^ if_flags) & mask) == 0) {
+                       dev_hold(dev);
+                       break;
+               }
+       }
+       read_unlock(&dev_base_lock);
+       return dev;
+}
+
+/**
+ *     dev_valid_name - check if name is okay for network device
+ *     @name: name string
+ *
+ *     Network device names need to be valid file names to
+ *     to allow sysfs to work
+ */
+static int dev_valid_name(const char *name)
+{
+       return !(*name == '\0' 
+                || !strcmp(name, ".")
+                || !strcmp(name, "..")
+                || strchr(name, '/'));
+}
+
+/**
+ *     dev_alloc_name - allocate a name for a device
+ *     @dev: device
+ *     @name: name format string
+ *
+ *     Passed a format string - eg "lt%d" it will try and find a suitable
+ *     id. Not efficient for many devices, not called a lot. The caller
+ *     must hold the dev_base or rtnl lock while allocating the name and
+ *     adding the device in order to avoid duplicates. Returns the number
+ *     of the unit assigned or a negative errno code.
+ */
+
+int dev_alloc_name(struct net_device *dev, const char *name)
+{
+       int i = 0;
+       char buf[IFNAMSIZ];
+       const char *p;
+       const int max_netdevices = 8*PAGE_SIZE;
+       long *inuse;
+       struct net_device *d;
+
+       p = strnchr(name, IFNAMSIZ-1, '%');
+       if (p) {
+               /*
+                * Verify the string as this thing may have come from
+                * the user.  There must be either one "%d" and no other "%"
+                * characters.
+                */
+               if (p[1] != 'd' || strchr(p + 2, '%'))
+                       return -EINVAL;
+
+               /* Use one page as a bit array of possible slots */
+               inuse = (long *) get_zeroed_page(GFP_ATOMIC);
+               if (!inuse)
+                       return -ENOMEM;
+
+               for (d = dev_base; d; d = d->next) {
+                       if (!sscanf(d->name, name, &i))
+                               continue;
+                       if (i < 0 || i >= max_netdevices)
+                               continue;
+
+                       /*  avoid cases where sscanf is not exact inverse of printf */
+                       snprintf(buf, sizeof(buf), name, i);
+                       if (!strncmp(buf, d->name, IFNAMSIZ))
+                               set_bit(i, inuse);
+               }
+
+               i = find_first_zero_bit(inuse, max_netdevices);
+               free_page((unsigned long) inuse);
+       }
+
+       snprintf(buf, sizeof(buf), name, i);
+       if (!__dev_get_by_name(buf)) {
+               strlcpy(dev->name, buf, IFNAMSIZ);
+               return i;
+       }
+
+       /* It is possible to run out of possible slots
+        * when the name is long and there isn't enough space left
+        * for the digits, or if all bits are used.
+        */
+       return -ENFILE;
+}
+
+
+/**
+ *     dev_change_name - change name of a device
+ *     @dev: device
+ *     @newname: name (or format string) must be at least IFNAMSIZ
+ *
+ *     Change name of a device, can pass format strings "eth%d".
+ *     for wildcarding.
+ */
+int dev_change_name(struct net_device *dev, char *newname)
+{
+       int err = 0;
+
+       ASSERT_RTNL();
+
+       if (dev->flags & IFF_UP)
+               return -EBUSY;
+
+       if (!dev_valid_name(newname))
+               return -EINVAL;
+
+       if (strchr(newname, '%')) {
+               err = dev_alloc_name(dev, newname);
+               if (err < 0)
+                       return err;
+               strcpy(newname, dev->name);
+       }
+       else if (__dev_get_by_name(newname))
+               return -EEXIST;
+       else
+               strlcpy(dev->name, newname, IFNAMSIZ);
+
+       err = class_device_rename(&dev->class_dev, dev->name);
+       if (!err) {
+               hlist_del(&dev->name_hlist);
+               hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
+               notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
+       }
+
+       return err;
+}
+
+/**
+ *     netdev_state_change - device changes state
+ *     @dev: device to cause notification
+ *
+ *     Called to indicate a device has changed state. This function calls
+ *     the notifier chains for netdev_chain and sends a NEWLINK message
+ *     to the routing socket.
+ */
+void netdev_state_change(struct net_device *dev)
+{
+       if (dev->flags & IFF_UP) {
+               notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
+               rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+       }
+}
+
+/**
+ *     dev_load        - load a network module
+ *     @name: name of interface
+ *
+ *     If a network interface is not present and the process has suitable
+ *     privileges this function loads the module. If module loading is not
+ *     available in this kernel then it becomes a nop.
+ */
+
+void dev_load(const char *name)
+{
+       struct net_device *dev;  
+
+       read_lock(&dev_base_lock);
+       dev = __dev_get_by_name(name);
+       read_unlock(&dev_base_lock);
+
+       if (!dev && capable(CAP_SYS_MODULE))
+               request_module("%s", name);
+}
+
+static int default_rebuild_header(struct sk_buff *skb)
+{
+       printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
+              skb->dev ? skb->dev->name : "NULL!!!");
+       kfree_skb(skb);
+       return 1;
+}
+
+
+/**
+ *     dev_open        - prepare an interface for use.
+ *     @dev:   device to open
+ *
+ *     Takes a device from down to up state. The device's private open
+ *     function is invoked and then the multicast lists are loaded. Finally
+ *     the device is moved into the up state and a %NETDEV_UP message is
+ *     sent to the netdev notifier chain.
+ *
+ *     Calling this function on an active interface is a nop. On a failure
+ *     a negative errno code is returned.
+ */
+int dev_open(struct net_device *dev)
+{
+       int ret = 0;
+
+       /*
+        *      Is it already up?
+        */
+
+       if (dev->flags & IFF_UP)
+               return 0;
+
+       /*
+        *      Is it even present?
+        */
+       if (!netif_device_present(dev))
+               return -ENODEV;
+
+       /*
+        *      Call device private open method
+        */
+       set_bit(__LINK_STATE_START, &dev->state);
+       if (dev->open) {
+               ret = dev->open(dev);
+               if (ret)
+                       clear_bit(__LINK_STATE_START, &dev->state);
+       }
+
+       /*
+        *      If it went open OK then:
+        */
+
+       if (!ret) {
+               /*
+                *      Set the flags.
+                */
+               dev->flags |= IFF_UP;
+
+               /*
+                *      Initialize multicasting status
+                */
+               dev_mc_upload(dev);
+
+               /*
+                *      Wakeup transmit queue engine
+                */
+               dev_activate(dev);
+
+               /*
+                *      ... and announce new interface.
+                */
+               notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+       }
+       return ret;
+}
+
+/**
+ *     dev_close - shutdown an interface.
+ *     @dev: device to shutdown
+ *
+ *     This function moves an active device into down state. A
+ *     %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
+ *     is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
+ *     chain.
+ */
+int dev_close(struct net_device *dev)
+{
+       if (!(dev->flags & IFF_UP))
+               return 0;
+
+       /*
+        *      Tell people we are going down, so that they can
+        *      prepare to death, when device is still operating.
+        */
+       notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
+
+       dev_deactivate(dev);
+
+       clear_bit(__LINK_STATE_START, &dev->state);
+
+       /* Synchronize to scheduled poll. We cannot touch poll list,
+        * it can be even on different cpu. So just clear netif_running(),
+        * and wait when poll really will happen. Actually, the best place
+        * for this is inside dev->stop() after device stopped its irq
+        * engine, but this requires more changes in devices. */
+
+       smp_mb__after_clear_bit(); /* Commit netif_running(). */
+       while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+               /* No hurry. */
+               current->state = TASK_INTERRUPTIBLE;
+               schedule_timeout(1);
+       }
+
+       /*
+        *      Call the device specific close. This cannot fail.
+        *      Only if device is UP
+        *
+        *      We allow it to be called even after a DETACH hot-plug
+        *      event.
+        */
+       if (dev->stop)
+               dev->stop(dev);
+
+       /*
+        *      Device is now down.
+        */
+
+       dev->flags &= ~IFF_UP;
+
+       /*
+        * Tell people we are down
+        */
+       notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+
+       return 0;
+}
+
+
+/*
+ *     Device change register/unregister. These are not inline or static
+ *     as we export them to the world.
+ */
+
+/**
+ *     register_netdevice_notifier - register a network notifier block
+ *     @nb: notifier
+ *
+ *     Register a notifier to be called when network device events occur.
+ *     The notifier passed is linked into the kernel structures and must
+ *     not be reused until it has been unregistered. A negative errno code
+ *     is returned on a failure.
+ *
+ *     When registered all registration and up events are replayed
+ *     to the new notifier to allow device to have a race free 
+ *     view of the network device list.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+       struct net_device *dev;
+       int err;
+
+       rtnl_lock();
+       err = notifier_chain_register(&netdev_chain, nb);
+       if (!err) {
+               for (dev = dev_base; dev; dev = dev->next) {
+                       nb->notifier_call(nb, NETDEV_REGISTER, dev);
+
+                       if (dev->flags & IFF_UP) 
+                               nb->notifier_call(nb, NETDEV_UP, dev);
+               }
+       }
+       rtnl_unlock();
+       return err;
+}
+
+/**
+ *     unregister_netdevice_notifier - unregister a network notifier block
+ *     @nb: notifier
+ *
+ *     Unregister a notifier previously registered by
+ *     register_netdevice_notifier(). The notifier is unlinked into the
+ *     kernel structures and may then be reused. A negative errno code
+ *     is returned on a failure.
+ */
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+       return notifier_chain_unregister(&netdev_chain, nb);
+}
+
+/**
+ *     call_netdevice_notifiers - call all network notifier blocks
+ *      @val: value passed unmodified to notifier function
+ *      @v:   pointer passed unmodified to notifier function
+ *
+ *     Call all network notifier blocks.  Parameters and return value
+ *     are as for notifier_call_chain().
+ */
+
+int call_netdevice_notifiers(unsigned long val, void *v)
+{
+       return notifier_call_chain(&netdev_chain, val, v);
+}
+
+/* When > 0 there are consumers of rx skb time stamps */
+static atomic_t netstamp_needed = ATOMIC_INIT(0);
+
+void net_enable_timestamp(void)
+{
+       atomic_inc(&netstamp_needed);
+}
+
+void net_disable_timestamp(void)
+{
+       atomic_dec(&netstamp_needed);
+}
+
+static inline void net_timestamp(struct timeval *stamp)
+{
+       if (atomic_read(&netstamp_needed))
+               do_gettimeofday(stamp);
+       else {
+               stamp->tv_sec = 0;
+               stamp->tv_usec = 0;
+       }
+}
+
+/*
+ *     Support routine. Sends outgoing frames to any network
+ *     taps currently in use.
+ */
+
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct packet_type *ptype;
+       net_timestamp(&skb->stamp);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ptype, &ptype_all, list) {
+               /* Never send packets back to the socket
+                * they originated from - MvS (miquels@drinkel.ow.org)
+                */
+               if ((ptype->dev == dev || !ptype->dev) &&
+                   (ptype->af_packet_priv == NULL ||
+                    (struct sock *)ptype->af_packet_priv != skb->sk)) {
+                       struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
+                       if (!skb2)
+                               break;
+
+                       /* skb->nh should be correctly
+                          set by sender, so that the second statement is
+                          just protection against buggy protocols.
+                        */
+                       skb2->mac.raw = skb2->data;
+
+                       if (skb2->nh.raw < skb2->data ||
+                           skb2->nh.raw > skb2->tail) {
+                               if (net_ratelimit())
+                                       printk(KERN_CRIT "protocol %04x is "
+                                              "buggy, dev %s\n",
+                                              skb2->protocol, dev->name);
+                               skb2->nh.raw = skb2->data;
+                       }
+
+                       skb2->h.raw = skb2->nh.raw;
+                       skb2->pkt_type = PACKET_OUTGOING;
+                       ptype->func(skb2, skb->dev, ptype);
+               }
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * Invalidate hardware checksum when packet is to be mangled, and
+ * complete checksum manually on outgoing path.
+ */
+int skb_checksum_help(struct sk_buff *skb, int inward)
+{
+       unsigned int csum;
+       int ret = 0, offset = skb->h.raw - skb->data;
+
+       if (inward) {
+               skb->ip_summed = CHECKSUM_NONE;
+               goto out;
+       }
+
+       if (skb_cloned(skb)) {
+               ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+               if (ret)
+                       goto out;
+       }
+
+       if (offset > (int)skb->len)
+               BUG();
+       csum = skb_checksum(skb, offset, skb->len-offset, 0);
+
+       offset = skb->tail - skb->h.raw;
+       if (offset <= 0)
+               BUG();
+       if (skb->csum + 2 > offset)
+               BUG();
+
+       *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
+       skb->ip_summed = CHECKSUM_NONE;
+out:   
+       return ret;
+}
+
+#ifdef CONFIG_HIGHMEM
+/* Actually, we should eliminate this check as soon as we know, that:
+ * 1. IOMMU is present and allows to map all the memory.
+ * 2. No high memory really exists on this machine.
+ */
+
+static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+{
+       int i;
+
+       if (dev->features & NETIF_F_HIGHDMA)
+               return 0;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+               if (PageHighMem(skb_shinfo(skb)->frags[i].page))
+                       return 1;
+
+       return 0;
+}
+#else
+#define illegal_highdma(dev, skb)      (0)
+#endif
+
+extern void skb_release_data(struct sk_buff *);
+
+/* Keep head the same: replace data */
+int __skb_linearize(struct sk_buff *skb, int gfp_mask)
+{
+       unsigned int size;
+       u8 *data;
+       long offset;
+       struct skb_shared_info *ninfo;
+       int headerlen = skb->data - skb->head;
+       int expand = (skb->tail + skb->data_len) - skb->end;
+
+       if (skb_shared(skb))
+               BUG();
+
+       if (expand <= 0)
+               expand = 0;
+
+       size = skb->end - skb->head + expand;
+       size = SKB_DATA_ALIGN(size);
+       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+       if (!data)
+               return -ENOMEM;
+
+       /* Copy entire thing */
+       if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
+               BUG();
+
+       /* Set up shinfo */
+       ninfo = (struct skb_shared_info*)(data + size);
+       atomic_set(&ninfo->dataref, 1);
+       ninfo->tso_size = skb_shinfo(skb)->tso_size;
+       ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
+       ninfo->nr_frags = 0;
+       ninfo->frag_list = NULL;
+
+       /* Offset between the two in bytes */
+       offset = data - skb->head;
+
+       /* Free old data. */
+       skb_release_data(skb);
+
+       skb->head = data;
+       skb->end  = data + size;
+
+       /* Set up new pointers */
+       skb->h.raw   += offset;
+       skb->nh.raw  += offset;
+       skb->mac.raw += offset;
+       skb->tail    += offset;
+       skb->data    += offset;
+
+       /* We are no longer a clone, even if we were. */
+       skb->cloned    = 0;
+
+       skb->tail     += skb->data_len;
+       skb->data_len  = 0;
+       return 0;
+}
+
+#define HARD_TX_LOCK(dev, cpu) {                       \
+       if ((dev->features & NETIF_F_LLTX) == 0) {      \
+               spin_lock(&dev->xmit_lock);             \
+               dev->xmit_lock_owner = cpu;             \
+       }                                               \
+}
+
+#define HARD_TX_UNLOCK(dev) {                          \
+       if ((dev->features & NETIF_F_LLTX) == 0) {      \
+               dev->xmit_lock_owner = -1;              \
+               spin_unlock(&dev->xmit_lock);           \
+       }                                               \
+}
+
+/**
+ *     dev_queue_xmit - transmit a buffer
+ *     @skb: buffer to transmit
+ *
+ *     Queue a buffer for transmission to a network device. The caller must
+ *     have set the device and priority and built the buffer before calling
+ *     this function. The function can be called from an interrupt.
+ *
+ *     A negative errno code is returned on a failure. A success does not
+ *     guarantee the frame will be transmitted as it may be dropped due
+ *     to congestion or traffic shaping.
+ */
+
+int dev_queue_xmit(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct Qdisc *q;
+       int rc = -ENOMEM;
+
+       if (skb_shinfo(skb)->frag_list &&
+           !(dev->features & NETIF_F_FRAGLIST) &&
+           __skb_linearize(skb, GFP_ATOMIC))
+               goto out_kfree_skb;
+
+       /* Fragmented skb is linearized if device does not support SG,
+        * or if at least one of fragments is in highmem and device
+        * does not support DMA from it.
+        */
+       if (skb_shinfo(skb)->nr_frags &&
+           (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
+           __skb_linearize(skb, GFP_ATOMIC))
+               goto out_kfree_skb;
+
+       /* If a checksum-deferred packet is forwarded to a device that needs a
+        * checksum, correct the pointers and force checksumming.
+        */
+       if (skb->proto_csum_blank) {
+               if (skb->protocol != htons(ETH_P_IP))
+                       goto out_kfree_skb;
+               skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
+               if (skb->h.raw >= skb->tail)
+                       goto out_kfree_skb;
+               switch (skb->nh.iph->protocol) {
+               case IPPROTO_TCP:
+                       skb->csum = offsetof(struct tcphdr, check);
+                       break;
+               case IPPROTO_UDP:
+                       skb->csum = offsetof(struct udphdr, check);
+                       break;
+               default:
+                       goto out_kfree_skb;
+               }
+               if ((skb->h.raw + skb->csum + 2) > skb->tail)
+                       goto out_kfree_skb;
+               skb->ip_summed = CHECKSUM_HW;
+       }
+
+       /* If packet is not checksummed and device does not support
+        * checksumming for this protocol, complete checksumming here.
+        */
+       if (skb->ip_summed == CHECKSUM_HW &&
+           (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
+            (!(dev->features & NETIF_F_IP_CSUM) ||
+             skb->protocol != htons(ETH_P_IP))))
+               if (skb_checksum_help(skb, 0))
+                       goto out_kfree_skb;
+
+       /* Disable soft irqs for various locks below. Also 
+        * stops preemption for RCU. 
+        */
+       local_bh_disable(); 
+
+       /* Updates of qdisc are serialized by queue_lock. 
+        * The struct Qdisc which is pointed to by qdisc is now a 
+        * rcu structure - it may be accessed without acquiring 
+        * a lock (but the structure may be stale.) The freeing of the
+        * qdisc will be deferred until it's known that there are no 
+        * more references to it.
+        * 
+        * If the qdisc has an enqueue function, we still need to 
+        * hold the queue_lock before calling it, since queue_lock
+        * also serializes access to the device queue.
+        */
+
+       q = rcu_dereference(dev->qdisc);
+#ifdef CONFIG_NET_CLS_ACT
+       skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
+#endif
+       if (q->enqueue) {
+               /* Grab device queue */
+               spin_lock(&dev->queue_lock);
+
+               rc = q->enqueue(skb, q);
+
+               qdisc_run(dev);
+
+               spin_unlock(&dev->queue_lock);
+               rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
+               goto out;
+       }
+
+       /* The device has no queue. Common case for software devices:
+          loopback, all the sorts of tunnels...
+
+          Really, it is unlikely that xmit_lock protection is necessary here.
+          (f.e. loopback and IP tunnels are clean ignoring statistics
+          counters.)
+          However, it is possible, that they rely on protection
+          made by us here.
+
+          Check this and shot the lock. It is not prone from deadlocks.
+          Either shot noqueue qdisc, it is even simpler 8)
+        */
+       if (dev->flags & IFF_UP) {
+               int cpu = smp_processor_id(); /* ok because BHs are off */
+
+               if (dev->xmit_lock_owner != cpu) {
+
+                       HARD_TX_LOCK(dev, cpu);
+
+                       if (!netif_queue_stopped(dev)) {
+                               if (netdev_nit)
+                                       dev_queue_xmit_nit(skb, dev);
+
+                               rc = 0;
+                               if (!dev->hard_start_xmit(skb, dev)) {
+                                       HARD_TX_UNLOCK(dev);
+                                       goto out;
+                               }
+                       }
+                       HARD_TX_UNLOCK(dev);
+                       if (net_ratelimit())
+                               printk(KERN_CRIT "Virtual device %s asks to "
+                                      "queue packet!\n", dev->name);
+               } else {
+                       /* Recursion is detected! It is possible,
+                        * unfortunately */
+                       if (net_ratelimit())
+                               printk(KERN_CRIT "Dead loop on virtual device "
+                                      "%s, fix it urgently!\n", dev->name);
+               }
+       }
+
+       rc = -ENETDOWN;
+       local_bh_enable();
+
+out_kfree_skb:
+       kfree_skb(skb);
+       return rc;
+out:
+       local_bh_enable();
+       return rc;
+}
+
+
+/*=======================================================================
+                       Receiver routines
+  =======================================================================*/
+
+int netdev_max_backlog = 300;
+int weight_p = 64;            /* old backlog weight */
+/* These numbers are selected based on intuition and some
+ * experimentatiom, if you have more scientific way of doing this
+ * please go ahead and fix things.
+ */
+int no_cong_thresh = 10;
+int no_cong = 20;
+int lo_cong = 100;
+int mod_cong = 290;
+
+DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
+
+
+static void get_sample_stats(int cpu)
+{
+#ifdef RAND_LIE
+       unsigned long rd;
+       int rq;
+#endif
+       struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+       int blog = sd->input_pkt_queue.qlen;
+       int avg_blog = sd->avg_blog;
+
+       avg_blog = (avg_blog >> 1) + (blog >> 1);
+
+       if (avg_blog > mod_cong) {
+               /* Above moderate congestion levels. */
+               sd->cng_level = NET_RX_CN_HIGH;
+#ifdef RAND_LIE
+               rd = net_random();
+               rq = rd % netdev_max_backlog;
+               if (rq < avg_blog) /* unlucky bastard */
+                       sd->cng_level = NET_RX_DROP;
+#endif
+       } else if (avg_blog > lo_cong) {
+               sd->cng_level = NET_RX_CN_MOD;
+#ifdef RAND_LIE
+               rd = net_random();
+               rq = rd % netdev_max_backlog;
+                       if (rq < avg_blog) /* unlucky bastard */
+                               sd->cng_level = NET_RX_CN_HIGH;
+#endif
+       } else if (avg_blog > no_cong)
+               sd->cng_level = NET_RX_CN_LOW;
+       else  /* no congestion */
+               sd->cng_level = NET_RX_SUCCESS;
+
+       sd->avg_blog = avg_blog;
+}
+
+#ifdef OFFLINE_SAMPLE
+static void sample_queue(unsigned long dummy)
+{
+/* 10 ms 0r 1ms -- i don't care -- JHS */
+       int next_tick = 1;
+       int cpu = smp_processor_id();
+
+       get_sample_stats(cpu);
+       next_tick += jiffies;
+       mod_timer(&samp_timer, next_tick);
+}
+#endif
+
+
+/**
+ *     netif_rx        -       post buffer to the network code
+ *     @skb: buffer to post
+ *
+ *     This function receives a packet from a device driver and queues it for
+ *     the upper (protocol) levels to process.  It always succeeds. The buffer
+ *     may be dropped during processing for congestion control or by the
+ *     protocol layers.
+ *
+ *     return values:
+ *     NET_RX_SUCCESS  (no congestion)
+ *     NET_RX_CN_LOW   (low congestion)
+ *     NET_RX_CN_MOD   (moderate congestion)
+ *     NET_RX_CN_HIGH  (high congestion)
+ *     NET_RX_DROP     (packet was dropped)
+ *
+ */
+
+int netif_rx(struct sk_buff *skb)
+{
+       int this_cpu;
+       struct softnet_data *queue;
+       unsigned long flags;
+
+#ifdef CONFIG_NETPOLL
+       if (skb->dev->netpoll_rx && netpoll_rx(skb)) {
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+#endif
+       
+       if (!skb->stamp.tv_sec)
+               net_timestamp(&skb->stamp);
+
+       /*
+        * The code is rearranged so that the path is the most
+        * short when CPU is congested, but is still operating.
+        */
+       local_irq_save(flags);
+       this_cpu = smp_processor_id();
+       queue = &__get_cpu_var(softnet_data);
+
+       __get_cpu_var(netdev_rx_stat).total++;
+       if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
+               if (queue->input_pkt_queue.qlen) {
+                       if (queue->throttle)
+                               goto drop;
+
+enqueue:
+                       dev_hold(skb->dev);
+                       __skb_queue_tail(&queue->input_pkt_queue, skb);
+#ifndef OFFLINE_SAMPLE
+                       get_sample_stats(this_cpu);
+#endif
+                       local_irq_restore(flags);
+                       return queue->cng_level;
+               }
+
+               if (queue->throttle)
+                       queue->throttle = 0;
+
+               netif_rx_schedule(&queue->backlog_dev);
+               goto enqueue;
+       }
+
+       if (!queue->throttle) {
+               queue->throttle = 1;
+               __get_cpu_var(netdev_rx_stat).throttled++;
+       }
+
+drop:
+       __get_cpu_var(netdev_rx_stat).dropped++;
+       local_irq_restore(flags);
+
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+int netif_rx_ni(struct sk_buff *skb)
+{
+       int err;
+
+       preempt_disable();
+       err = netif_rx(skb);
+       if (local_softirq_pending())
+               do_softirq();
+       preempt_enable();
+
+       return err;
+}
+
+EXPORT_SYMBOL(netif_rx_ni);
+
+static __inline__ void skb_bond(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+
+       if (dev->master) {
+               skb->real_dev = skb->dev;
+               skb->dev = dev->master;
+       }
+}
+
+static void net_tx_action(struct softirq_action *h)
+{
+       struct softnet_data *sd = &__get_cpu_var(softnet_data);
+
+       if (sd->completion_queue) {
+               struct sk_buff *clist;
+
+               local_irq_disable();
+               clist = sd->completion_queue;
+               sd->completion_queue = NULL;
+               local_irq_enable();
+
+               while (clist) {
+                       struct sk_buff *skb = clist;
+                       clist = clist->next;
+
+                       BUG_TRAP(!atomic_read(&skb->users));
+                       __kfree_skb(skb);
+               }
+       }
+
+       if (sd->output_queue) {
+               struct net_device *head;
+
+               local_irq_disable();
+               head = sd->output_queue;
+               sd->output_queue = NULL;
+               local_irq_enable();
+
+               while (head) {
+                       struct net_device *dev = head;
+                       head = head->next_sched;
+
+                       smp_mb__before_clear_bit();
+                       clear_bit(__LINK_STATE_SCHED, &dev->state);
+
+                       if (spin_trylock(&dev->queue_lock)) {
+                               qdisc_run(dev);
+                               spin_unlock(&dev->queue_lock);
+                       } else {
+                               netif_schedule(dev);
+                       }
+               }
+       }
+}
+
+static __inline__ int deliver_skb(struct sk_buff *skb,
+                                 struct packet_type *pt_prev)
+{
+       atomic_inc(&skb->users);
+       return pt_prev->func(skb, skb->dev, pt_prev);
+}
+
+#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
+int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
+
+static __inline__ int handle_bridge(struct sk_buff **pskb,
+                                   struct packet_type **pt_prev, int *ret)
+{
+       struct net_bridge_port *port;
+
+       if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
+           (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
+               return 0;
+
+       if (*pt_prev) {
+               *ret = deliver_skb(*pskb, *pt_prev);
+               *pt_prev = NULL;
+       } 
+       
+       return br_handle_frame_hook(port, pskb);
+}
+#else
+#define handle_bridge(skb, pt_prev, ret)       (0)
+#endif
+
+#ifdef CONFIG_NET_CLS_ACT
+/* TODO: Maybe we should just force sch_ingress to be compiled in
+ * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
+ * a compare and 2 stores extra right now if we dont have it on
+ * but have CONFIG_NET_CLS_ACT
+ * NOTE: This doesnt stop any functionality; if you dont have 
+ * the ingress scheduler, you just cant add policies on ingress.
+ *
+ */
+static int ing_filter(struct sk_buff *skb) 
+{
+       struct Qdisc *q;
+       struct net_device *dev = skb->dev;
+       int result = TC_ACT_OK;
+       
+       if (dev->qdisc_ingress) {
+               __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
+               if (MAX_RED_LOOP < ttl++) {
+                       printk("Redir loop detected Dropping packet (%s->%s)\n",
+                               skb->input_dev?skb->input_dev->name:"??",skb->dev->name);
+                       return TC_ACT_SHOT;
+               }
+
+               skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
+
+               skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
+               if (NULL == skb->input_dev) {
+                       skb->input_dev = skb->dev;
+                       printk("ing_filter:  fixed  %s out %s\n",skb->input_dev->name,skb->dev->name);
+               }
+               spin_lock(&dev->ingress_lock);
+               if ((q = dev->qdisc_ingress) != NULL)
+                       result = q->enqueue(skb, q);
+               spin_unlock(&dev->ingress_lock);
+
+       }
+
+       return result;
+}
+#endif
+
+int netif_receive_skb(struct sk_buff *skb)
+{
+       struct packet_type *ptype, *pt_prev;
+       int ret = NET_RX_DROP;
+       unsigned short type;
+
+#ifdef CONFIG_NETPOLL
+       if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) {
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+#endif
+
+       if (!skb->stamp.tv_sec)
+               net_timestamp(&skb->stamp);
+
+       skb_bond(skb);
+
+       __get_cpu_var(netdev_rx_stat).total++;
+
+       skb->h.raw = skb->nh.raw = skb->data;
+       skb->mac_len = skb->nh.raw - skb->mac.raw;
+
+       pt_prev = NULL;
+
+       rcu_read_lock();
+
+#ifdef CONFIG_NET_CLS_ACT
+       if (skb->tc_verd & TC_NCLS) {
+               skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
+               goto ncls;
+       }
+#endif
+
+       switch (skb->ip_summed) {
+       case CHECKSUM_UNNECESSARY:
+               skb->proto_csum_valid = 1;
+               break;
+       case CHECKSUM_HW:
+               /* XXX Implement me. */
+       default:
+               skb->proto_csum_valid = 0;
+               break;
+       }
+
+       list_for_each_entry_rcu(ptype, &ptype_all, list) {
+               if (!ptype->dev || ptype->dev == skb->dev) {
+                       if (pt_prev) 
+                               ret = deliver_skb(skb, pt_prev);
+                       pt_prev = ptype;
+               }
+       }
+
+#ifdef CONFIG_NET_CLS_ACT
+       if (pt_prev) {
+               ret = deliver_skb(skb, pt_prev);
+               pt_prev = NULL; /* noone else should process this after*/
+       } else {
+               skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
+       }
+
+       ret = ing_filter(skb);
+
+       if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
+               kfree_skb(skb);
+               goto out;
+       }
+
+       skb->tc_verd = 0;
+ncls:
+#endif
+
+       handle_diverter(skb);
+
+       if (handle_bridge(&skb, &pt_prev, &ret))
+               goto out;
+
+       type = skb->protocol;
+       list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
+               if (ptype->type == type &&
+                   (!ptype->dev || ptype->dev == skb->dev)) {
+                       if (pt_prev) 
+                               ret = deliver_skb(skb, pt_prev);
+                       pt_prev = ptype;
+               }
+       }
+
+       if (pt_prev) {
+               ret = pt_prev->func(skb, skb->dev, pt_prev);
+       } else {
+               kfree_skb(skb);
+               /* Jamal, now you will not able to escape explaining
+                * me how you were going to use this. :-)
+                */
+               ret = NET_RX_DROP;
+       }
+
+out:
+       rcu_read_unlock();
+       return ret;
+}
+
+static int process_backlog(struct net_device *backlog_dev, int *budget)
+{
+       int work = 0;
+       int quota = min(backlog_dev->quota, *budget);
+       struct softnet_data *queue = &__get_cpu_var(softnet_data);
+       unsigned long start_time = jiffies;
+
+       for (;;) {
+               struct sk_buff *skb;
+               struct net_device *dev;
+
+               local_irq_disable();
+               skb = __skb_dequeue(&queue->input_pkt_queue);
+               if (!skb)
+                       goto job_done;
+               local_irq_enable();
+
+               dev = skb->dev;
+
+               netif_receive_skb(skb);
+
+               dev_put(dev);
+
+               work++;
+
+               if (work >= quota || jiffies - start_time > 1)
+                       break;
+
+       }
+
+       backlog_dev->quota -= work;
+       *budget -= work;
+       return -1;
+
+job_done:
+       backlog_dev->quota -= work;
+       *budget -= work;
+
+       list_del(&backlog_dev->poll_list);
+       smp_mb__before_clear_bit();
+       netif_poll_enable(backlog_dev);
+
+       if (queue->throttle)
+               queue->throttle = 0;
+       local_irq_enable();
+       return 0;
+}
+
+static void net_rx_action(struct softirq_action *h)
+{
+       struct softnet_data *queue = &__get_cpu_var(softnet_data);
+       unsigned long start_time = jiffies;
+       int budget = netdev_max_backlog;
+
+       
+       local_irq_disable();
+
+       while (!list_empty(&queue->poll_list)) {
+               struct net_device *dev;
+
+               if (budget <= 0 || jiffies - start_time > 1)
+                       goto softnet_break;
+
+               local_irq_enable();
+
+               dev = list_entry(queue->poll_list.next,
+                                struct net_device, poll_list);
+
+               if (dev->quota <= 0 || dev->poll(dev, &budget)) {
+                       local_irq_disable();
+                       list_del(&dev->poll_list);
+                       list_add_tail(&dev->poll_list, &queue->poll_list);
+                       if (dev->quota < 0)
+                               dev->quota += dev->weight;
+                       else
+                               dev->quota = dev->weight;
+               } else {
+                       dev_put(dev);
+                       local_irq_disable();
+               }
+       }
+out:
+       local_irq_enable();
+       return;
+
+softnet_break:
+       __get_cpu_var(netdev_rx_stat).time_squeeze++;
+       __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+       goto out;
+}
+
+static gifconf_func_t * gifconf_list [NPROTO];
+
+/**
+ *     register_gifconf        -       register a SIOCGIF handler
+ *     @family: Address family
+ *     @gifconf: Function handler
+ *
+ *     Register protocol dependent address dumping routines. The handler
+ *     that is passed must not be freed or reused until it has been replaced
+ *     by another handler.
+ */
+int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
+{
+       if (family >= NPROTO)
+               return -EINVAL;
+       gifconf_list[family] = gifconf;
+       return 0;
+}
+
+
+/*
+ *     Map an interface index to its name (SIOCGIFNAME)
+ */
+
+/*
+ *     We need this ioctl for efficient implementation of the
+ *     if_indextoname() function required by the IPv6 API.  Without
+ *     it, we would have to search all the interfaces to find a
+ *     match.  --pb
+ */
+
+static int dev_ifname(struct ifreq __user *arg)
+{
+       struct net_device *dev;
+       struct ifreq ifr;
+
+       /*
+        *      Fetch the caller's info block.
+        */
+
+       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+               return -EFAULT;
+
+       read_lock(&dev_base_lock);
+       dev = __dev_get_by_index(ifr.ifr_ifindex);
+       if (!dev) {
+               read_unlock(&dev_base_lock);
+               return -ENODEV;
+       }
+
+       strcpy(ifr.ifr_name, dev->name);
+       read_unlock(&dev_base_lock);
+
+       if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+               return -EFAULT;
+       return 0;
+}
+
+/*
+ *     Perform a SIOCGIFCONF call. This structure will change
+ *     size eventually, and there is nothing I can do about it.
+ *     Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char __user *arg)
+{
+       struct ifconf ifc;
+       struct net_device *dev;
+       char __user *pos;
+       int len;
+       int total;
+       int i;
+
+       /*
+        *      Fetch the caller's info block.
+        */
+
+       if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
+               return -EFAULT;
+
+       pos = ifc.ifc_buf;
+       len = ifc.ifc_len;
+
+       /*
+        *      Loop over the interfaces, and write an info block for each.
+        */
+
+       total = 0;
+       for (dev = dev_base; dev; dev = dev->next) {
+               for (i = 0; i < NPROTO; i++) {
+                       if (gifconf_list[i]) {
+                               int done;
+                               if (!pos)
+                                       done = gifconf_list[i](dev, NULL, 0);
+                               else
+                                       done = gifconf_list[i](dev, pos + total,
+                                                              len - total);
+                               if (done < 0)
+                                       return -EFAULT;
+                               total += done;
+                       }
+               }
+       }
+
+       /*
+        *      All done.  Write the updated control block back to the caller.
+        */
+       ifc.ifc_len = total;
+
+       /*
+        *      Both BSD and Solaris return 0 here, so we do too.
+        */
+       return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ *     This is invoked by the /proc filesystem handler to display a device
+ *     in detail.
+ */
+static __inline__ struct net_device *dev_get_idx(loff_t pos)
+{
+       struct net_device *dev;
+       loff_t i;
+
+       for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
+
+       return i == pos ? dev : NULL;
+}
+
+void *dev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       read_lock(&dev_base_lock);
+       return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
+}
+
+void dev_seq_stop(struct seq_file *seq, void *v)
+{
+       read_unlock(&dev_base_lock);
+}
+
+static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
+{
+       if (dev->get_stats) {
+               struct net_device_stats *stats = dev->get_stats(dev);
+
+               seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+                               "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+                          dev->name, stats->rx_bytes, stats->rx_packets,
+                          stats->rx_errors,
+                          stats->rx_dropped + stats->rx_missed_errors,
+                          stats->rx_fifo_errors,
+                          stats->rx_length_errors + stats->rx_over_errors +
+                            stats->rx_crc_errors + stats->rx_frame_errors,
+                          stats->rx_compressed, stats->multicast,
+                          stats->tx_bytes, stats->tx_packets,
+                          stats->tx_errors, stats->tx_dropped,
+                          stats->tx_fifo_errors, stats->collisions,
+                          stats->tx_carrier_errors +
+                            stats->tx_aborted_errors +
+                            stats->tx_window_errors +
+                            stats->tx_heartbeat_errors,
+                          stats->tx_compressed);
+       } else
+               seq_printf(seq, "%6s: No statistics available.\n", dev->name);
+}
+
+/*
+ *     Called from the PROCfs module. This now uses the new arbitrary sized
+ *     /proc/net interface to create /proc/net/dev
+ */
+static int dev_seq_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "Inter-|   Receive                            "
+                             "                    |  Transmit\n"
+                             " face |bytes    packets errs drop fifo frame "
+                             "compressed multicast|bytes    packets errs "
+                             "drop fifo colls carrier compressed\n");
+       else
+               dev_seq_printf_stats(seq, v);
+       return 0;
+}
+
+static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+{
+       struct netif_rx_stats *rc = NULL;
+
+       while (*pos < NR_CPUS)
+               if (cpu_online(*pos)) {
+                       rc = &per_cpu(netdev_rx_stat, *pos);
+                       break;
+               } else
+                       ++*pos;
+       return rc;
+}
+
+static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       return softnet_get_online(pos);
+}
+
+static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return softnet_get_online(pos);
+}
+
+static void softnet_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int softnet_seq_show(struct seq_file *seq, void *v)
+{
+       struct netif_rx_stats *s = v;
+
+       seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+                  s->total, s->dropped, s->time_squeeze, s->throttled,
+                  s->fastroute_hit, s->fastroute_success, s->fastroute_defer,
+                  s->fastroute_deferred_out,
+#if 0
+                  s->fastroute_latency_reduction
+#else
+                  s->cpu_collision
+#endif
+                 );
+       return 0;
+}
+
+static struct seq_operations dev_seq_ops = {
+       .start = dev_seq_start,
+       .next  = dev_seq_next,
+       .stop  = dev_seq_stop,
+       .show  = dev_seq_show,
+};
+
+static int dev_seq_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &dev_seq_ops);
+}
+
+static struct file_operations dev_seq_fops = {
+       .owner   = THIS_MODULE,
+       .open    = dev_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+static struct seq_operations softnet_seq_ops = {
+       .start = softnet_seq_start,
+       .next  = softnet_seq_next,
+       .stop  = softnet_seq_stop,
+       .show  = softnet_seq_show,
+};
+
+static int softnet_seq_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &softnet_seq_ops);
+}
+
+static struct file_operations softnet_seq_fops = {
+       .owner   = THIS_MODULE,
+       .open    = softnet_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+#ifdef WIRELESS_EXT
+extern int wireless_proc_init(void);
+#else
+#define wireless_proc_init() 0
+#endif
+
+static int __init dev_proc_init(void)
+{
+       int rc = -ENOMEM;
+
+       if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
+               goto out;
+       if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
+               goto out_dev;
+       if (wireless_proc_init())
+               goto out_softnet;
+       rc = 0;
+out:
+       return rc;
+out_softnet:
+       proc_net_remove("softnet_stat");
+out_dev:
+       proc_net_remove("dev");
+       goto out;
+}
+#else
+#define dev_proc_init() 0
+#endif /* CONFIG_PROC_FS */
+
+
+/**
+ *     netdev_set_master       -       set up master/slave pair
+ *     @slave: slave device
+ *     @master: new master device
+ *
+ *     Changes the master device of the slave. Pass %NULL to break the
+ *     bonding. The caller must hold the RTNL semaphore. On a failure
+ *     a negative errno code is returned. On success the reference counts
+ *     are adjusted, %RTM_NEWLINK is sent to the routing socket and the
+ *     function returns zero.
+ */
+int netdev_set_master(struct net_device *slave, struct net_device *master)
+{
+       struct net_device *old = slave->master;
+
+       ASSERT_RTNL();
+
+       if (master) {
+               if (old)
+                       return -EBUSY;
+               dev_hold(master);
+       }
+
+       slave->master = master;
+       
+       synchronize_net();
+
+       if (old)
+               dev_put(old);
+
+       if (master)
+               slave->flags |= IFF_SLAVE;
+       else
+               slave->flags &= ~IFF_SLAVE;
+
+       rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
+       return 0;
+}
+
+/**
+ *     dev_set_promiscuity     - update promiscuity count on a device
+ *     @dev: device
+ *     @inc: modifier
+ *
+ *     Add or remove promsicuity from a device. While the count in the device
+ *     remains above zero the interface remains promiscuous. Once it hits zero
+ *     the device reverts back to normal filtering operation. A negative inc
+ *     value is used to drop promiscuity on the device.
+ */
+void dev_set_promiscuity(struct net_device *dev, int inc)
+{
+       unsigned short old_flags = dev->flags;
+
+       dev->flags |= IFF_PROMISC;
+       if ((dev->promiscuity += inc) == 0)
+               dev->flags &= ~IFF_PROMISC;
+       if (dev->flags ^ old_flags) {
+               dev_mc_upload(dev);
+               printk(KERN_INFO "device %s %s promiscuous mode\n",
+                      dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
+                                                              "left");
+       }
+}
+
+/**
+ *     dev_set_allmulti        - update allmulti count on a device
+ *     @dev: device
+ *     @inc: modifier
+ *
+ *     Add or remove reception of all multicast frames to a device. While the
+ *     count in the device remains above zero the interface remains listening
+ *     to all interfaces. Once it hits zero the device reverts back to normal
+ *     filtering operation. A negative @inc value is used to drop the counter
+ *     when releasing a resource needing all multicasts.
+ */
+
+void dev_set_allmulti(struct net_device *dev, int inc)
+{
+       unsigned short old_flags = dev->flags;
+
+       dev->flags |= IFF_ALLMULTI;
+       if ((dev->allmulti += inc) == 0)
+               dev->flags &= ~IFF_ALLMULTI;
+       if (dev->flags ^ old_flags)
+               dev_mc_upload(dev);
+}
+
+unsigned dev_get_flags(const struct net_device *dev)
+{
+       unsigned flags;
+
+       flags = (dev->flags & ~(IFF_PROMISC |
+                               IFF_ALLMULTI |
+                               IFF_RUNNING)) | 
+               (dev->gflags & (IFF_PROMISC |
+                               IFF_ALLMULTI));
+
+       if (netif_running(dev) && netif_carrier_ok(dev))
+               flags |= IFF_RUNNING;
+
+       return flags;
+}
+
+int dev_change_flags(struct net_device *dev, unsigned flags)
+{
+       int ret;
+       int old_flags = dev->flags;
+
+       /*
+        *      Set the flags on our device.
+        */
+
+       dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
+                              IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
+                              IFF_AUTOMEDIA)) |
+                    (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
+                                   IFF_ALLMULTI));
+
+       /*
+        *      Load in the correct multicast list now the flags have changed.
+        */
+
+       dev_mc_upload(dev);
+
+       /*
+        *      Have we downed the interface. We handle IFF_UP ourselves
+        *      according to user attempts to set it, rather than blindly
+        *      setting it.
+        */
+
+       ret = 0;
+       if ((old_flags ^ flags) & IFF_UP) {     /* Bit is different  ? */
+               ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
+
+               if (!ret)
+                       dev_mc_upload(dev);
+       }
+
+       if (dev->flags & IFF_UP &&
+           ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+                                         IFF_VOLATILE)))
+               notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
+
+       if ((flags ^ dev->gflags) & IFF_PROMISC) {
+               int inc = (flags & IFF_PROMISC) ? +1 : -1;
+               dev->gflags ^= IFF_PROMISC;
+               dev_set_promiscuity(dev, inc);
+       }
+
+       /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
+          is important. Some (broken) drivers set IFF_PROMISC, when
+          IFF_ALLMULTI is requested not asking us and not reporting.
+        */
+       if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
+               int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
+               dev->gflags ^= IFF_ALLMULTI;
+               dev_set_allmulti(dev, inc);
+       }
+
+       if (old_flags ^ dev->flags)
+               rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
+
+       return ret;
+}
+
+int dev_set_mtu(struct net_device *dev, int new_mtu)
+{
+       int err;
+
+       if (new_mtu == dev->mtu)
+               return 0;
+
+       /*      MTU must be positive.    */
+       if (new_mtu < 0)
+               return -EINVAL;
+
+       if (!netif_device_present(dev))
+               return -ENODEV;
+
+       err = 0;
+       if (dev->change_mtu)
+               err = dev->change_mtu(dev, new_mtu);
+       else
+               dev->mtu = new_mtu;
+       if (!err && dev->flags & IFF_UP)
+               notifier_call_chain(&netdev_chain,
+                                   NETDEV_CHANGEMTU, dev);
+       return err;
+}
+
+
+/*
+ *     Perform the SIOCxIFxxx calls.
+ */
+static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
+{
+       int err;
+       struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
+
+       if (!dev)
+               return -ENODEV;
+
+       switch (cmd) {
+               case SIOCGIFFLAGS:      /* Get interface flags */
+                       ifr->ifr_flags = dev_get_flags(dev);
+                       return 0;
+
+               case SIOCSIFFLAGS:      /* Set interface flags */
+                       return dev_change_flags(dev, ifr->ifr_flags);
+
+               case SIOCGIFMETRIC:     /* Get the metric on the interface
+                                          (currently unused) */
+                       ifr->ifr_metric = 0;
+                       return 0;
+
+               case SIOCSIFMETRIC:     /* Set the metric on the interface
+                                          (currently unused) */
+                       return -EOPNOTSUPP;
+
+               case SIOCGIFMTU:        /* Get the MTU of a device */
+                       ifr->ifr_mtu = dev->mtu;
+                       return 0;
+
+               case SIOCSIFMTU:        /* Set the MTU of a device */
+                       return dev_set_mtu(dev, ifr->ifr_mtu);
+
+               case SIOCGIFHWADDR:
+                       if (!dev->addr_len)
+                               memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
+                       else
+                               memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
+                                      min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+                       ifr->ifr_hwaddr.sa_family = dev->type;
+                       return 0;
+
+               case SIOCSIFHWADDR:
+                       if (!dev->set_mac_address)
+                               return -EOPNOTSUPP;
+                       if (ifr->ifr_hwaddr.sa_family != dev->type)
+                               return -EINVAL;
+                       if (!netif_device_present(dev))
+                               return -ENODEV;
+                       err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
+                       if (!err)
+                               notifier_call_chain(&netdev_chain,
+                                                   NETDEV_CHANGEADDR, dev);
+                       return err;
+
+               case SIOCSIFHWBROADCAST:
+                       if (ifr->ifr_hwaddr.sa_family != dev->type)
+                               return -EINVAL;
+                       memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
+                              min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+                       notifier_call_chain(&netdev_chain,
+                                           NETDEV_CHANGEADDR, dev);
+                       return 0;
+
+               case SIOCGIFMAP:
+                       ifr->ifr_map.mem_start = dev->mem_start;
+                       ifr->ifr_map.mem_end   = dev->mem_end;
+                       ifr->ifr_map.base_addr = dev->base_addr;
+                       ifr->ifr_map.irq       = dev->irq;
+                       ifr->ifr_map.dma       = dev->dma;
+                       ifr->ifr_map.port      = dev->if_port;
+                       return 0;
+
+               case SIOCSIFMAP:
+                       if (dev->set_config) {
+                               if (!netif_device_present(dev))
+                                       return -ENODEV;
+                               return dev->set_config(dev, &ifr->ifr_map);
+                       }
+                       return -EOPNOTSUPP;
+
+               case SIOCADDMULTI:
+                       if (!dev->set_multicast_list ||
+                           ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+                               return -EINVAL;
+                       if (!netif_device_present(dev))
+                               return -ENODEV;
+                       return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
+                                         dev->addr_len, 1);
+
+               case SIOCDELMULTI:
+                       if (!dev->set_multicast_list ||
+                           ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+                               return -EINVAL;
+                       if (!netif_device_present(dev))
+                               return -ENODEV;
+                       return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
+                                            dev->addr_len, 1);
+
+               case SIOCGIFINDEX:
+                       ifr->ifr_ifindex = dev->ifindex;
+                       return 0;
+
+               case SIOCGIFTXQLEN:
+                       ifr->ifr_qlen = dev->tx_queue_len;
+                       return 0;
+
+               case SIOCSIFTXQLEN:
+                       if (ifr->ifr_qlen < 0)
+                               return -EINVAL;
+                       dev->tx_queue_len = ifr->ifr_qlen;
+                       return 0;
+
+               case SIOCSIFNAME:
+                       ifr->ifr_newname[IFNAMSIZ-1] = '\0';
+                       return dev_change_name(dev, ifr->ifr_newname);
+
+               /*
+                *      Unknown or private ioctl
+                */
+
+               default:
+                       if ((cmd >= SIOCDEVPRIVATE &&
+                           cmd <= SIOCDEVPRIVATE + 15) ||
+                           cmd == SIOCBONDENSLAVE ||
+                           cmd == SIOCBONDRELEASE ||
+                           cmd == SIOCBONDSETHWADDR ||
+                           cmd == SIOCBONDSLAVEINFOQUERY ||
+                           cmd == SIOCBONDINFOQUERY ||
+                           cmd == SIOCBONDCHANGEACTIVE ||
+                           cmd == SIOCGMIIPHY ||
+                           cmd == SIOCGMIIREG ||
+                           cmd == SIOCSMIIREG ||
+                           cmd == SIOCBRADDIF ||
+                           cmd == SIOCBRDELIF ||
+                           cmd == SIOCWANDEV) {
+                               err = -EOPNOTSUPP;
+                               if (dev->do_ioctl) {
+                                       if (netif_device_present(dev))
+                                               err = dev->do_ioctl(dev, ifr,
+                                                                   cmd);
+                                       else
+                                               err = -ENODEV;
+                               }
+                       } else
+                               err = -EINVAL;
+
+       }
+       return err;
+}
+
+/*
+ *     This function handles all "interface"-type I/O control requests. The actual
+ *     'doing' part of this is dev_ifsioc above.
+ */
+
+/**
+ *     dev_ioctl       -       network device ioctl
+ *     @cmd: command to issue
+ *     @arg: pointer to a struct ifreq in user space
+ *
+ *     Issue ioctl functions to devices. This is normally called by the
+ *     user space syscall interfaces but can sometimes be useful for
+ *     other purposes. The return value is the return from the syscall if
+ *     positive or a negative errno code on error.
+ */
+
+int dev_ioctl(unsigned int cmd, void __user *arg)
+{
+       struct ifreq ifr;
+       int ret;
+       char *colon;
+
+       /* One special case: SIOCGIFCONF takes ifconf argument
+          and requires shared lock, because it sleeps writing
+          to user space.
+        */
+
+       if (cmd == SIOCGIFCONF) {
+               rtnl_shlock();
+               ret = dev_ifconf((char __user *) arg);
+               rtnl_shunlock();
+               return ret;
+       }
+       if (cmd == SIOCGIFNAME)
+               return dev_ifname((struct ifreq __user *)arg);
+
+       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+               return -EFAULT;
+
+       ifr.ifr_name[IFNAMSIZ-1] = 0;
+
+       colon = strchr(ifr.ifr_name, ':');
+       if (colon)
+               *colon = 0;
+
+       /*
+        *      See which interface the caller is talking about.
+        */
+
+       switch (cmd) {
+               /*
+                *      These ioctl calls:
+                *      - can be done by all.
+                *      - atomic and do not require locking.
+                *      - return a value
+                */
+               case SIOCGIFFLAGS:
+               case SIOCGIFMETRIC:
+               case SIOCGIFMTU:
+               case SIOCGIFHWADDR:
+               case SIOCGIFSLAVE:
+               case SIOCGIFMAP:
+               case SIOCGIFINDEX:
+               case SIOCGIFTXQLEN:
+                       dev_load(ifr.ifr_name);
+                       read_lock(&dev_base_lock);
+                       ret = dev_ifsioc(&ifr, cmd);
+                       read_unlock(&dev_base_lock);
+                       if (!ret) {
+                               if (colon)
+                                       *colon = ':';
+                               if (copy_to_user(arg, &ifr,
+                                                sizeof(struct ifreq)))
+                                       ret = -EFAULT;
+                       }
+                       return ret;
+
+               case SIOCETHTOOL:
+                       dev_load(ifr.ifr_name);
+                       rtnl_lock();
+                       ret = dev_ethtool(&ifr);
+                       rtnl_unlock();
+                       if (!ret) {
+                               if (colon)
+                                       *colon = ':';
+                               if (copy_to_user(arg, &ifr,
+                                                sizeof(struct ifreq)))
+                                       ret = -EFAULT;
+                       }
+                       return ret;
+
+               /*
+                *      These ioctl calls:
+                *      - require superuser power.
+                *      - require strict serialization.
+                *      - return a value
+                */
+               case SIOCGMIIPHY:
+               case SIOCGMIIREG:
+               case SIOCSIFNAME:
+                       if (!capable(CAP_NET_ADMIN))
+                               return -EPERM;
+                       dev_load(ifr.ifr_name);
+                       rtnl_lock();
+                       ret = dev_ifsioc(&ifr, cmd);
+                       rtnl_unlock();
+                       if (!ret) {
+                               if (colon)
+                                       *colon = ':';
+                               if (copy_to_user(arg, &ifr,
+                                                sizeof(struct ifreq)))
+                                       ret = -EFAULT;
+                       }
+                       return ret;
+
+               /*
+                *      These ioctl calls:
+                *      - require superuser power.
+                *      - require strict serialization.
+                *      - do not return a value
+                */
+               case SIOCSIFFLAGS:
+               case SIOCSIFMETRIC:
+               case SIOCSIFMTU:
+               case SIOCSIFMAP:
+               case SIOCSIFHWADDR:
+               case SIOCSIFSLAVE:
+               case SIOCADDMULTI:
+               case SIOCDELMULTI:
+               case SIOCSIFHWBROADCAST:
+               case SIOCSIFTXQLEN:
+               case SIOCSMIIREG:
+               case SIOCBONDENSLAVE:
+               case SIOCBONDRELEASE:
+               case SIOCBONDSETHWADDR:
+               case SIOCBONDSLAVEINFOQUERY:
+               case SIOCBONDINFOQUERY:
+               case SIOCBONDCHANGEACTIVE:
+               case SIOCBRADDIF:
+               case SIOCBRDELIF:
+                       if (!capable(CAP_NET_ADMIN))
+                               return -EPERM;
+                       dev_load(ifr.ifr_name);
+                       rtnl_lock();
+                       ret = dev_ifsioc(&ifr, cmd);
+                       rtnl_unlock();
+                       return ret;
+
+               case SIOCGIFMEM:
+                       /* Get the per device memory space. We can add this but
+                        * currently do not support it */
+               case SIOCSIFMEM:
+                       /* Set the per device memory buffer space.
+                        * Not applicable in our case */
+               case SIOCSIFLINK:
+                       return -EINVAL;
+
+               /*
+                *      Unknown or private ioctl.
+                */
+               default:
+                       if (cmd == SIOCWANDEV ||
+                           (cmd >= SIOCDEVPRIVATE &&
+                            cmd <= SIOCDEVPRIVATE + 15)) {
+                               dev_load(ifr.ifr_name);
+                               rtnl_lock();
+                               ret = dev_ifsioc(&ifr, cmd);
+                               rtnl_unlock();
+                               if (!ret && copy_to_user(arg, &ifr,
+                                                        sizeof(struct ifreq)))
+                                       ret = -EFAULT;
+                               return ret;
+                       }
+#ifdef WIRELESS_EXT
+                       /* Take care of Wireless Extensions */
+                       if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+                               /* If command is `set a parameter', or
+                                * `get the encoding parameters', check if
+                                * the user has the right to do it */
+                               if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
+                                       if (!capable(CAP_NET_ADMIN))
+                                               return -EPERM;
+                               }
+                               dev_load(ifr.ifr_name);
+                               rtnl_lock();
+                               /* Follow me in net/core/wireless.c */
+                               ret = wireless_process_ioctl(&ifr, cmd);
+                               rtnl_unlock();
+                               if (IW_IS_GET(cmd) &&
+                                   copy_to_user(arg, &ifr,
+                                                sizeof(struct ifreq)))
+                                       ret = -EFAULT;
+                               return ret;
+                       }
+#endif /* WIRELESS_EXT */
+                       return -EINVAL;
+       }
+}
+
+
+/**
+ *     dev_new_index   -       allocate an ifindex
+ *
+ *     Returns a suitable unique value for a new device interface
+ *     number.  The caller must hold the rtnl semaphore or the
+ *     dev_base_lock to be sure it remains unique.
+ */
+static int dev_new_index(void)
+{
+       static int ifindex;
+       for (;;) {
+               if (++ifindex <= 0)
+                       ifindex = 1;
+               if (!__dev_get_by_index(ifindex))
+                       return ifindex;
+       }
+}
+
+static int dev_boot_phase = 1;
+
+/* Delayed registration/unregisteration */
+static DEFINE_SPINLOCK(net_todo_list_lock);
+static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
+
+static inline void net_set_todo(struct net_device *dev)
+{
+       spin_lock(&net_todo_list_lock);
+       list_add_tail(&dev->todo_list, &net_todo_list);
+       spin_unlock(&net_todo_list_lock);
+}
+
+/**
+ *     register_netdevice      - register a network device
+ *     @dev: device to register
+ *
+ *     Take a completed network device structure and add it to the kernel
+ *     interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
+ *     chain. 0 is returned on success. A negative errno code is returned
+ *     on a failure to set up the device, or if the name is a duplicate.
+ *
+ *     Callers must hold the rtnl semaphore. You may want
+ *     register_netdev() instead of this.
+ *
+ *     BUGS:
+ *     The locking appears insufficient to guarantee two parallel registers
+ *     will not get the same name.
+ */
+
+int register_netdevice(struct net_device *dev)
+{
+       struct hlist_head *head;
+       struct hlist_node *p;
+       int ret;
+
+       BUG_ON(dev_boot_phase);
+       ASSERT_RTNL();
+
+       /* When net_device's are persistent, this will be fatal. */
+       BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
+
+       spin_lock_init(&dev->queue_lock);
+       spin_lock_init(&dev->xmit_lock);
+       dev->xmit_lock_owner = -1;
+#ifdef CONFIG_NET_CLS_ACT
+       spin_lock_init(&dev->ingress_lock);
+#endif
+
+       ret = alloc_divert_blk(dev);
+       if (ret)
+               goto out;
+
+       dev->iflink = -1;
+
+       /* Init, if this function is available */
+       if (dev->init) {
+               ret = dev->init(dev);
+               if (ret) {
+                       if (ret > 0)
+                               ret = -EIO;
+                       goto out_err;
+               }
+       }
+       if (!dev_valid_name(dev->name)) {
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       dev->ifindex = dev_new_index();
+       if (dev->iflink == -1)
+               dev->iflink = dev->ifindex;
+
+       /* Check for existence of name */
+       head = dev_name_hash(dev->name);
+       hlist_for_each(p, head) {
+               struct net_device *d
+                       = hlist_entry(p, struct net_device, name_hlist);
+               if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
+                       ret = -EEXIST;
+                       goto out_err;
+               }
+       }
+
+       /* Fix illegal SG+CSUM combinations. */
+       if ((dev->features & NETIF_F_SG) &&
+           !(dev->features & (NETIF_F_IP_CSUM |
+                              NETIF_F_NO_CSUM |
+                              NETIF_F_HW_CSUM))) {
+               printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
+                      dev->name);
+               dev->features &= ~NETIF_F_SG;
+       }
+
+       /* TSO requires that SG is present as well. */
+       if ((dev->features & NETIF_F_TSO) &&
+           !(dev->features & NETIF_F_SG)) {
+               printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
+                      dev->name);
+               dev->features &= ~NETIF_F_TSO;
+       }
+
+       /*
+        *      nil rebuild_header routine,
+        *      that should be never called and used as just bug trap.
+        */
+
+       if (!dev->rebuild_header)
+               dev->rebuild_header = default_rebuild_header;
+
+       /*
+        *      Default initial state at registry is that the
+        *      device is present.
+        */
+
+       set_bit(__LINK_STATE_PRESENT, &dev->state);
+
+       dev->next = NULL;
+       dev_init_scheduler(dev);
+       write_lock_bh(&dev_base_lock);
+       *dev_tail = dev;
+       dev_tail = &dev->next;
+       hlist_add_head(&dev->name_hlist, head);
+       hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
+       dev_hold(dev);
+       dev->reg_state = NETREG_REGISTERING;
+       write_unlock_bh(&dev_base_lock);
+
+       /* Notify protocols, that a new device appeared. */
+       notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
+
+       /* Finish registration after unlock */
+       net_set_todo(dev);
+       ret = 0;
+
+out:
+       return ret;
+out_err:
+       free_divert_blk(dev);
+       goto out;
+}
+
+/**
+ *     register_netdev - register a network device
+ *     @dev: device to register
+ *
+ *     Take a completed network device structure and add it to the kernel
+ *     interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
+ *     chain. 0 is returned on success. A negative errno code is returned
+ *     on a failure to set up the device, or if the name is a duplicate.
+ *
+ *     This is a wrapper around register_netdev that takes the rtnl semaphore
+ *     and expands the device name if you passed a format string to
+ *     alloc_netdev.
+ */
+int register_netdev(struct net_device *dev)
+{
+       int err;
+
+       rtnl_lock();
+
+       /*
+        * If the name is a format string the caller wants us to do a
+        * name allocation.
+        */
+       if (strchr(dev->name, '%')) {
+               err = dev_alloc_name(dev, dev->name);
+               if (err < 0)
+                       goto out;
+       }
+       
+       /*
+        * Back compatibility hook. Kill this one in 2.5
+        */
+       if (dev->name[0] == 0 || dev->name[0] == ' ') {
+               err = dev_alloc_name(dev, "eth%d");
+               if (err < 0)
+                       goto out;
+       }
+
+       err = register_netdevice(dev);
+out:
+       rtnl_unlock();
+       return err;
+}
+EXPORT_SYMBOL(register_netdev);
+
+/*
+ * netdev_wait_allrefs - wait until all references are gone.
+ *
+ * This is called when unregistering network devices.
+ *
+ * Any protocol or device that holds a reference should register
+ * for netdevice notification, and cleanup and put back the
+ * reference if they receive an UNREGISTER event.
+ * We can get stuck here if buggy protocols don't correctly
+ * call dev_put. 
+ */
+static void netdev_wait_allrefs(struct net_device *dev)
+{
+       unsigned long rebroadcast_time, warning_time;
+
+       rebroadcast_time = warning_time = jiffies;
+       while (atomic_read(&dev->refcnt) != 0) {
+               if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
+                       rtnl_shlock();
+
+                       /* Rebroadcast unregister notification */
+                       notifier_call_chain(&netdev_chain,
+                                           NETDEV_UNREGISTER, dev);
+
+                       if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
+                                    &dev->state)) {
+                               /* We must not have linkwatch events
+                                * pending on unregister. If this
+                                * happens, we simply run the queue
+                                * unscheduled, resulting in a noop
+                                * for this device.
+                                */
+                               linkwatch_run_queue();
+                       }
+
+                       rtnl_shunlock();
+
+                       rebroadcast_time = jiffies;
+               }
+
+               msleep(250);
+
+               if (time_after(jiffies, warning_time + 10 * HZ)) {
+                       printk(KERN_EMERG "unregister_netdevice: "
+                              "waiting for %s to become free. Usage "
+                              "count = %d\n",
+                              dev->name, atomic_read(&dev->refcnt));
+                       warning_time = jiffies;
+               }
+       }
+}
+
+/* The sequence is:
+ *
+ *     rtnl_lock();
+ *     ...
+ *     register_netdevice(x1);
+ *     register_netdevice(x2);
+ *     ...
+ *     unregister_netdevice(y1);
+ *     unregister_netdevice(y2);
+ *      ...
+ *     rtnl_unlock();
+ *     free_netdev(y1);
+ *     free_netdev(y2);
+ *
+ * We are invoked by rtnl_unlock() after it drops the semaphore.
+ * This allows us to deal with problems:
+ * 1) We can create/delete sysfs objects which invoke hotplug
+ *    without deadlocking with linkwatch via keventd.
+ * 2) Since we run with the RTNL semaphore not held, we can sleep
+ *    safely in order to wait for the netdev refcnt to drop to zero.
+ */
+static DECLARE_MUTEX(net_todo_run_mutex);
+void netdev_run_todo(void)
+{
+       struct list_head list = LIST_HEAD_INIT(list);
+       int err;
+
+
+       /* Need to guard against multiple cpu's getting out of order. */
+       down(&net_todo_run_mutex);
+
+       /* Not safe to do outside the semaphore.  We must not return
+        * until all unregister events invoked by the local processor
+        * have been completed (either by this todo run, or one on
+        * another cpu).
+        */
+       if (list_empty(&net_todo_list))
+               goto out;
+
+       /* Snapshot list, allow later requests */
+       spin_lock(&net_todo_list_lock);
+       list_splice_init(&net_todo_list, &list);
+       spin_unlock(&net_todo_list_lock);
+               
+       while (!list_empty(&list)) {
+               struct net_device *dev
+                       = list_entry(list.next, struct net_device, todo_list);
+               list_del(&dev->todo_list);
+
+               switch(dev->reg_state) {
+               case NETREG_REGISTERING:
+                       err = netdev_register_sysfs(dev);
+                       if (err)
+                               printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
+                                      dev->name, err);
+                       dev->reg_state = NETREG_REGISTERED;
+                       break;
+
+               case NETREG_UNREGISTERING:
+                       netdev_unregister_sysfs(dev);
+                       dev->reg_state = NETREG_UNREGISTERED;
+
+                       netdev_wait_allrefs(dev);
+
+                       /* paranoia */
+                       BUG_ON(atomic_read(&dev->refcnt));
+                       BUG_TRAP(!dev->ip_ptr);
+                       BUG_TRAP(!dev->ip6_ptr);
+                       BUG_TRAP(!dev->dn_ptr);
+
+
+                       /* It must be the very last action, 
+                        * after this 'dev' may point to freed up memory.
+                        */
+                       if (dev->destructor)
+                               dev->destructor(dev);
+                       break;
+
+               default:
+                       printk(KERN_ERR "network todo '%s' but state %d\n",
+                              dev->name, dev->reg_state);
+                       break;
+               }
+       }
+
+out:
+       up(&net_todo_run_mutex);
+}
+
+/**
+ *     alloc_netdev - allocate network device
+ *     @sizeof_priv:   size of private data to allocate space for
+ *     @name:          device name format string
+ *     @setup:         callback to initialize device
+ *
+ *     Allocates a struct net_device with private data area for driver use
+ *     and performs basic initialization.
+ */
+struct net_device *alloc_netdev(int sizeof_priv, const char *name,
+               void (*setup)(struct net_device *))
+{
+       void *p;
+       struct net_device *dev;
+       int alloc_size;
+
+       /* ensure 32-byte alignment of both the device and private area */
+       alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
+       alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
+
+       p = kmalloc(alloc_size, GFP_KERNEL);
+       if (!p) {
+               printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
+               return NULL;
+       }
+       memset(p, 0, alloc_size);
+
+       dev = (struct net_device *)
+               (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
+       dev->padded = (char *)dev - (char *)p;
+
+       if (sizeof_priv)
+               dev->priv = netdev_priv(dev);
+
+       setup(dev);
+       strcpy(dev->name, name);
+       return dev;
+}
+EXPORT_SYMBOL(alloc_netdev);
+
+/**
+ *     free_netdev - free network device
+ *     @dev: device
+ *
+ *     This function does the last stage of destroying an allocated device 
+ *     interface. The reference to the device object is released.  
+ *     If this is the last reference then it will be freed.
+ */
+void free_netdev(struct net_device *dev)
+{
+#ifdef CONFIG_SYSFS
+       /*  Compatiablity with error handling in drivers */
+       if (dev->reg_state == NETREG_UNINITIALIZED) {
+               kfree((char *)dev - dev->padded);
+               return;
+       }
+
+       BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
+       dev->reg_state = NETREG_RELEASED;
+
+       /* will free via class release */
+       class_device_put(&dev->class_dev);
+#else
+       kfree((char *)dev - dev->padded);
+#endif
+}
+/* Synchronize with packet receive processing. */
+void synchronize_net(void) 
+{
+       might_sleep();
+       synchronize_kernel();
+}
+
+/**
+ *     unregister_netdevice - remove device from the kernel
+ *     @dev: device
+ *
+ *     This function shuts down a device interface and removes it
+ *     from the kernel tables. On success 0 is returned, on a failure
+ *     a negative errno code is returned.
+ *
+ *     Callers must hold the rtnl semaphore.  You may want
+ *     unregister_netdev() instead of this.
+ */
+
+int unregister_netdevice(struct net_device *dev)
+{
+       struct net_device *d, **dp;
+
+       BUG_ON(dev_boot_phase);
+       ASSERT_RTNL();
+
+       /* Some devices call without registering for initialization unwind. */
+       if (dev->reg_state == NETREG_UNINITIALIZED) {
+               printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
+                                 "was registered\n", dev->name, dev);
+               return -ENODEV;
+       }
+
+       BUG_ON(dev->reg_state != NETREG_REGISTERED);
+
+       /* If device is running, close it first. */
+       if (dev->flags & IFF_UP)
+               dev_close(dev);
+
+       /* And unlink it from device chain. */
+       for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
+               if (d == dev) {
+                       write_lock_bh(&dev_base_lock);
+                       hlist_del(&dev->name_hlist);
+                       hlist_del(&dev->index_hlist);
+                       if (dev_tail == &dev->next)
+                               dev_tail = dp;
+                       *dp = d->next;
+                       write_unlock_bh(&dev_base_lock);
+                       break;
+               }
+       }
+       if (!d) {
+               printk(KERN_ERR "unregister net_device: '%s' not found\n",
+                      dev->name);
+               return -ENODEV;
+       }
+
+       dev->reg_state = NETREG_UNREGISTERING;
+
+       synchronize_net();
+
+       /* Shutdown queueing discipline. */
+       dev_shutdown(dev);
+
+       
+       /* Notify protocols, that we are about to destroy
+          this device. They should clean all the things.
+       */
+       notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
+       
+       /*
+        *      Flush the multicast chain
+        */
+       dev_mc_discard(dev);
+
+       if (dev->uninit)
+               dev->uninit(dev);
+
+       /* Notifier chain MUST detach us from master device. */
+       BUG_TRAP(!dev->master);
+
+       free_divert_blk(dev);
+
+       /* Finish processing unregister after unlock */
+       net_set_todo(dev);
+
+       synchronize_net();
+
+       dev_put(dev);
+       return 0;
+}
+
+/**
+ *     unregister_netdev - remove device from the kernel
+ *     @dev: device
+ *
+ *     This function shuts down a device interface and removes it
+ *     from the kernel tables. On success 0 is returned, on a failure
+ *     a negative errno code is returned.
+ *
+ *     This is just a wrapper for unregister_netdevice that takes
+ *     the rtnl semaphore.  In general you want to use this and not
+ *     unregister_netdevice.
+ */
+void unregister_netdev(struct net_device *dev)
+{
+       rtnl_lock();
+       unregister_netdevice(dev);
+       rtnl_unlock();
+}
+
+EXPORT_SYMBOL(unregister_netdev);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int dev_cpu_callback(struct notifier_block *nfb,
+                           unsigned long action,
+                           void *ocpu)
+{
+       struct sk_buff **list_skb;
+       struct net_device **list_net;
+       struct sk_buff *skb;
+       unsigned int cpu, oldcpu = (unsigned long)ocpu;
+       struct softnet_data *sd, *oldsd;
+
+       if (action != CPU_DEAD)
+               return NOTIFY_OK;
+
+       local_irq_disable();
+       cpu = smp_processor_id();
+       sd = &per_cpu(softnet_data, cpu);
+       oldsd = &per_cpu(softnet_data, oldcpu);
+
+       /* Find end of our completion_queue. */
+       list_skb = &sd->completion_queue;
+       while (*list_skb)
+               list_skb = &(*list_skb)->next;
+       /* Append completion queue from offline CPU. */
+       *list_skb = oldsd->completion_queue;
+       oldsd->completion_queue = NULL;
+
+       /* Find end of our output_queue. */
+       list_net = &sd->output_queue;
+       while (*list_net)
+               list_net = &(*list_net)->next_sched;
+       /* Append output queue from offline CPU. */
+       *list_net = oldsd->output_queue;
+       oldsd->output_queue = NULL;
+
+       raise_softirq_irqoff(NET_TX_SOFTIRQ);
+       local_irq_enable();
+
+       /* Process offline CPU's input_pkt_queue */
+       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
+               netif_rx(skb);
+
+       return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+
+/*
+ *     Initialize the DEV module. At boot time this walks the device list and
+ *     unhooks any devices that fail to initialise (normally hardware not
+ *     present) and leaves us with a valid list of present and active devices.
+ *
+ */
+
+/*
+ *       This is called single threaded during boot, so no need
+ *       to take the rtnl semaphore.
+ */
+static int __init net_dev_init(void)
+{
+       int i, rc = -ENOMEM;
+
+       BUG_ON(!dev_boot_phase);
+
+       net_random_init();
+
+       if (dev_proc_init())
+               goto out;
+
+       if (netdev_sysfs_init())
+               goto out;
+
+       INIT_LIST_HEAD(&ptype_all);
+       for (i = 0; i < 16; i++) 
+               INIT_LIST_HEAD(&ptype_base[i]);
+
+       for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
+               INIT_HLIST_HEAD(&dev_name_head[i]);
+
+       for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
+               INIT_HLIST_HEAD(&dev_index_head[i]);
+
+       /*
+        *      Initialise the packet receive queues.
+        */
+
+       for (i = 0; i < NR_CPUS; i++) {
+               struct softnet_data *queue;
+
+               queue = &per_cpu(softnet_data, i);
+               skb_queue_head_init(&queue->input_pkt_queue);
+               queue->throttle = 0;
+               queue->cng_level = 0;
+               queue->avg_blog = 10; /* arbitrary non-zero */
+               queue->completion_queue = NULL;
+               INIT_LIST_HEAD(&queue->poll_list);
+               set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
+               queue->backlog_dev.weight = weight_p;
+               queue->backlog_dev.poll = process_backlog;
+               atomic_set(&queue->backlog_dev.refcnt, 1);
+       }
+
+#ifdef OFFLINE_SAMPLE
+       samp_timer.expires = jiffies + (10 * HZ);
+       add_timer(&samp_timer);
+#endif
+
+       dev_boot_phase = 0;
+
+       open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
+       open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
+
+       hotcpu_notifier(dev_cpu_callback, 0);
+       dst_init();
+       dev_mcast_init();
+       rc = 0;
+out:
+       return rc;
+}
+
+subsys_initcall(net_dev_init);
+
+EXPORT_SYMBOL(__dev_get_by_index);
+EXPORT_SYMBOL(__dev_get_by_name);
+EXPORT_SYMBOL(__dev_remove_pack);
+EXPORT_SYMBOL(__skb_linearize);
+EXPORT_SYMBOL(dev_add_pack);
+EXPORT_SYMBOL(dev_alloc_name);
+EXPORT_SYMBOL(dev_close);
+EXPORT_SYMBOL(dev_get_by_flags);
+EXPORT_SYMBOL(dev_get_by_index);
+EXPORT_SYMBOL(dev_get_by_name);
+EXPORT_SYMBOL(dev_ioctl);
+EXPORT_SYMBOL(dev_open);
+EXPORT_SYMBOL(dev_queue_xmit);
+EXPORT_SYMBOL(dev_remove_pack);
+EXPORT_SYMBOL(dev_set_allmulti);
+EXPORT_SYMBOL(dev_set_promiscuity);
+EXPORT_SYMBOL(dev_change_flags);
+EXPORT_SYMBOL(dev_set_mtu);
+EXPORT_SYMBOL(free_netdev);
+EXPORT_SYMBOL(netdev_boot_setup_check);
+EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_state_change);
+EXPORT_SYMBOL(netif_receive_skb);
+EXPORT_SYMBOL(netif_rx);
+EXPORT_SYMBOL(register_gifconf);
+EXPORT_SYMBOL(register_netdevice);
+EXPORT_SYMBOL(register_netdevice_notifier);
+EXPORT_SYMBOL(skb_checksum_help);
+EXPORT_SYMBOL(synchronize_net);
+EXPORT_SYMBOL(unregister_netdevice);
+EXPORT_SYMBOL(unregister_netdevice_notifier);
+EXPORT_SYMBOL(net_enable_timestamp);
+EXPORT_SYMBOL(net_disable_timestamp);
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+EXPORT_SYMBOL(br_handle_frame_hook);
+#endif
+
+#ifdef CONFIG_KMOD
+EXPORT_SYMBOL(dev_load);
+#endif
+
+EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/linux-2.6.11-xen-sparse/net/core/skbuff.c b/linux-2.6.11-xen-sparse/net/core/skbuff.c
new file mode 100644 (file)
index 0000000..be2801e
--- /dev/null
@@ -0,0 +1,1523 @@
+/*
+ *     Routines having to do with the 'struct sk_buff' memory handlers.
+ *
+ *     Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
+ *                     Florian La Roche <rzsfl@rz.uni-sb.de>
+ *
+ *     Version:        $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
+ *
+ *     Fixes:
+ *             Alan Cox        :       Fixed the worst of the load
+ *                                     balancer bugs.
+ *             Dave Platt      :       Interrupt stacking fix.
+ *     Richard Kooijman        :       Timestamp fixes.
+ *             Alan Cox        :       Changed buffer format.
+ *             Alan Cox        :       destructor hook for AF_UNIX etc.
+ *             Linus Torvalds  :       Better skb_clone.
+ *             Alan Cox        :       Added skb_copy.
+ *             Alan Cox        :       Added all the changed routines Linus
+ *                                     only put in the headers
+ *             Ray VanTassle   :       Fixed --skb->lock in free
+ *             Alan Cox        :       skb_copy copy arp field
+ *             Andi Kleen      :       slabified it.
+ *             Robert Olsson   :       Removed skb_head_pool
+ *
+ *     NOTE:
+ *             The __skb_ routines should be called with interrupts
+ *     disabled, or you better be *real* sure that the operation is atomic
+ *     with respect to whatever list is being frobbed (e.g. via lock_sock()
+ *     or via disabling bottom half handlers, etc).
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+/*
+ *     The functions in this file will not compile correctly with gcc 2.4.x
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#ifdef CONFIG_NET_CLS_ACT
+#include <net/pkt_sched.h>
+#endif
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <net/protocol.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/checksum.h>
+#include <net/xfrm.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+static kmem_cache_t *skbuff_head_cache;
+
+/*
+ *     Keep out-of-line to prevent kernel bloat.
+ *     __builtin_return_address is not used because it is not always
+ *     reliable.
+ */
+
+/**
+ *     skb_over_panic  -       private function
+ *     @skb: buffer
+ *     @sz: size
+ *     @here: address
+ *
+ *     Out of line support code for skb_put(). Not user callable.
+ */
+void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+{
+       printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s",
+               here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+       BUG();
+}
+
+/**
+ *     skb_under_panic -       private function
+ *     @skb: buffer
+ *     @sz: size
+ *     @here: address
+ *
+ *     Out of line support code for skb_push(). Not user callable.
+ */
+
+void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+{
+       printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s",
+               here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
+       BUG();
+}
+
+/*     Allocate a new skbuff. We do this ourselves so we can fill in a few
+ *     'private' fields and also do memory statistics to find all the
+ *     [BEEP] leaks.
+ *
+ */
+
+/**
+ *     alloc_skb       -       allocate a network buffer
+ *     @size: size to allocate
+ *     @gfp_mask: allocation mask
+ *
+ *     Allocate a new &sk_buff. The returned buffer has no headroom and a
+ *     tail room of size bytes. The object has a reference count of one.
+ *     The return is the buffer. On a failure the return is %NULL.
+ *
+ *     Buffers may only be allocated from interrupts using a @gfp_mask of
+ *     %GFP_ATOMIC.
+ */
+struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
+{
+       struct sk_buff *skb;
+       u8 *data;
+
+       /* Get the HEAD */
+       skb = kmem_cache_alloc(skbuff_head_cache,
+                              gfp_mask & ~__GFP_DMA);
+       if (!skb)
+               goto out;
+
+       /* Get the DATA. Size must match skb_add_mtu(). */
+       size = SKB_DATA_ALIGN(size);
+       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+       if (!data)
+               goto nodata;
+
+       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       skb->truesize = size + sizeof(struct sk_buff);
+       atomic_set(&skb->users, 1);
+       skb->head = data;
+       skb->data = data;
+       skb->tail = data;
+       skb->end  = data + size;
+
+       atomic_set(&(skb_shinfo(skb)->dataref), 1);
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->tso_segs = 0;
+       skb_shinfo(skb)->frag_list = NULL;
+out:
+       return skb;
+nodata:
+       kmem_cache_free(skbuff_head_cache, skb);
+       skb = NULL;
+       goto out;
+}
+
+/**
+ *     alloc_skb_from_cache    -       allocate a network buffer
+ *     @cp: kmem_cache from which to allocate the data area
+ *           (object size must be big enough for @size bytes + skb overheads)
+ *     @size: size to allocate
+ *     @gfp_mask: allocation mask
+ *
+ *     Allocate a new &sk_buff. The returned buffer has no headroom and
+ *     tail room of size bytes. The object has a reference count of one.
+ *     The return is the buffer. On a failure the return is %NULL.
+ *
+ *     Buffers may only be allocated from interrupts using a @gfp_mask of
+ *     %GFP_ATOMIC.
+ */
+struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+                                    unsigned int size, int gfp_mask)
+{
+       struct sk_buff *skb;
+       u8 *data;
+
+       /* Get the HEAD */
+       skb = kmem_cache_alloc(skbuff_head_cache,
+                              gfp_mask & ~__GFP_DMA);
+       if (!skb)
+               goto out;
+
+       /* Get the DATA. */
+       size = SKB_DATA_ALIGN(size);
+       data = kmem_cache_alloc(cp, gfp_mask);
+       if (!data)
+               goto nodata;
+
+       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       skb->truesize = size + sizeof(struct sk_buff);
+       atomic_set(&skb->users, 1);
+       skb->head = data;
+       skb->data = data;
+       skb->tail = data;
+       skb->end  = data + size;
+
+       atomic_set(&(skb_shinfo(skb)->dataref), 1);
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb_shinfo(skb)->tso_size = 0;
+       skb_shinfo(skb)->tso_segs = 0;
+       skb_shinfo(skb)->frag_list = NULL;
+out:
+       return skb;
+nodata:
+       kmem_cache_free(skbuff_head_cache, skb);
+       skb = NULL;
+       goto out;
+}
+
+
+static void skb_drop_fraglist(struct sk_buff *skb)
+{
+       struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+       skb_shinfo(skb)->frag_list = NULL;
+
+       do {
+               struct sk_buff *this = list;
+               list = list->next;
+               kfree_skb(this);
+       } while (list);
+}
+
+static void skb_clone_fraglist(struct sk_buff *skb)
+{
+       struct sk_buff *list;
+
+       for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
+               skb_get(list);
+}
+
+void skb_release_data(struct sk_buff *skb)
+{
+       if (!skb->cloned ||
+           atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
+               if (skb_shinfo(skb)->nr_frags) {
+                       int i;
+                       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+                               put_page(skb_shinfo(skb)->frags[i].page);
+               }
+
+               if (skb_shinfo(skb)->frag_list)
+                       skb_drop_fraglist(skb);
+
+               kfree(skb->head);
+       }
+}
+
+/*
+ *     Free an skbuff by memory without cleaning the state.
+ */
+void kfree_skbmem(struct sk_buff *skb)
+{
+       skb_release_data(skb);
+       kmem_cache_free(skbuff_head_cache, skb);
+}
+
+/**
+ *     __kfree_skb - private function
+ *     @skb: buffer
+ *
+ *     Free an sk_buff. Release anything attached to the buffer.
+ *     Clean the state. This is an internal helper function. Users should
+ *     always call kfree_skb
+ */
+
+void __kfree_skb(struct sk_buff *skb)
+{
+       if (skb->list) {
+               printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
+                      "on a list (from %p).\n", NET_CALLER(skb));
+               BUG();
+       }
+
+       dst_release(skb->dst);
+#ifdef CONFIG_XFRM
+       secpath_put(skb->sp);
+#endif
+       if(skb->destructor) {
+               if (in_irq())
+                       printk(KERN_WARNING "Warning: kfree_skb on "
+                                           "hard IRQ %p\n", NET_CALLER(skb));
+               skb->destructor(skb);
+       }
+#ifdef CONFIG_NETFILTER
+       nf_conntrack_put(skb->nfct);
+#ifdef CONFIG_BRIDGE_NETFILTER
+       nf_bridge_put(skb->nf_bridge);
+#endif
+#endif
+/* XXX: IS this still necessary? - JHS */
+#ifdef CONFIG_NET_SCHED
+       skb->tc_index = 0;
+#ifdef CONFIG_NET_CLS_ACT
+       skb->tc_verd = 0;
+       skb->tc_classid = 0;
+#endif
+#endif
+
+       kfree_skbmem(skb);
+}
+
+/**
+ *     skb_clone       -       duplicate an sk_buff
+ *     @skb: buffer to clone
+ *     @gfp_mask: allocation priority
+ *
+ *     Duplicate an &sk_buff. The new one is not owned by a socket. Both
+ *     copies share the same packet data but not structure. The new
+ *     buffer has a reference count of 1. If the allocation fails the
+ *     function returns %NULL otherwise the new buffer is returned.
+ *
+ *     If this function is called from an interrupt gfp_mask() must be
+ *     %GFP_ATOMIC.
+ */
+
+struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
+{
+       struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+
+       if (!n) 
+               return NULL;
+
+#define C(x) n->x = skb->x
+
+       n->next = n->prev = NULL;
+       n->list = NULL;
+       n->sk = NULL;
+       C(stamp);
+       C(dev);
+       C(real_dev);
+       C(h);
+       C(nh);
+       C(mac);
+       C(dst);
+       dst_clone(skb->dst);
+       C(sp);
+#ifdef CONFIG_INET
+       secpath_get(skb->sp);
+#endif
+       memcpy(n->cb, skb->cb, sizeof(skb->cb));
+       C(len);
+       C(data_len);
+       C(csum);
+       C(local_df);
+       n->cloned = 1;
+       C(proto_csum_valid);
+       C(proto_csum_blank);
+       C(pkt_type);
+       C(ip_summed);
+       C(priority);
+       C(protocol);
+       C(security);
+       n->destructor = NULL;
+#ifdef CONFIG_NETFILTER
+       C(nfmark);
+       C(nfcache);
+       C(nfct);
+       nf_conntrack_get(skb->nfct);
+       C(nfctinfo);
+#ifdef CONFIG_NETFILTER_DEBUG
+       C(nf_debug);
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       C(nf_bridge);
+       nf_bridge_get(skb->nf_bridge);
+#endif
+#endif /*CONFIG_NETFILTER*/
+#if defined(CONFIG_HIPPI)
+       C(private);
+#endif
+#ifdef CONFIG_NET_SCHED
+       C(tc_index);
+#ifdef CONFIG_NET_CLS_ACT
+       n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
+       n->tc_verd = CLR_TC_OK2MUNGE(skb->tc_verd);
+       n->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
+       C(input_dev);
+       C(tc_classid);
+#endif
+
+#endif
+       C(truesize);
+       atomic_set(&n->users, 1);
+       C(head);
+       C(data);
+       C(tail);
+       C(end);
+
+       atomic_inc(&(skb_shinfo(skb)->dataref));
+       skb->cloned = 1;
+
+       return n;
+}
+
+static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+{
+       /*
+        *      Shift between the two data areas in bytes
+        */
+       unsigned long offset = new->data - old->data;
+
+       new->list       = NULL;
+       new->sk         = NULL;
+       new->dev        = old->dev;
+       new->real_dev   = old->real_dev;
+       new->priority   = old->priority;
+       new->protocol   = old->protocol;
+       new->dst        = dst_clone(old->dst);
+#ifdef CONFIG_INET
+       new->sp         = secpath_get(old->sp);
+#endif
+       new->h.raw      = old->h.raw + offset;
+       new->nh.raw     = old->nh.raw + offset;
+       new->mac.raw    = old->mac.raw + offset;
+       memcpy(new->cb, old->cb, sizeof(old->cb));
+       new->local_df   = old->local_df;
+       new->pkt_type   = old->pkt_type;
+       new->stamp      = old->stamp;
+       new->destructor = NULL;
+       new->security   = old->security;
+#ifdef CONFIG_NETFILTER
+       new->nfmark     = old->nfmark;
+       new->nfcache    = old->nfcache;
+       new->nfct       = old->nfct;
+       nf_conntrack_get(old->nfct);
+       new->nfctinfo   = old->nfctinfo;
+#ifdef CONFIG_NETFILTER_DEBUG
+       new->nf_debug   = old->nf_debug;
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       new->nf_bridge  = old->nf_bridge;
+       nf_bridge_get(old->nf_bridge);
+#endif
+#endif
+#ifdef CONFIG_NET_SCHED
+#ifdef CONFIG_NET_CLS_ACT
+       new->tc_verd = old->tc_verd;
+#endif
+       new->tc_index   = old->tc_index;
+#endif
+       atomic_set(&new->users, 1);
+       skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
+       skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
+}
+
+/**
+ *     skb_copy        -       create private copy of an sk_buff
+ *     @skb: buffer to copy
+ *     @gfp_mask: allocation priority
+ *
+ *     Make a copy of both an &sk_buff and its data. This is used when the
+ *     caller wishes to modify the data and needs a private copy of the
+ *     data to alter. Returns %NULL on failure or the pointer to the buffer
+ *     on success. The returned buffer has a reference count of 1.
+ *
+ *     As by-product this function converts non-linear &sk_buff to linear
+ *     one, so that &sk_buff becomes completely private and caller is allowed
+ *     to modify all the data of returned buffer. This means that this
+ *     function is not recommended for use in circumstances when only
+ *     header is going to be modified. Use pskb_copy() instead.
+ */
+
+struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
+{
+       int headerlen = skb->data - skb->head;
+       /*
+        *      Allocate the copy buffer
+        */
+       struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
+                                     gfp_mask);
+       if (!n)
+               return NULL;
+
+       /* Set the data pointer */
+       skb_reserve(n, headerlen);
+       /* Set the tail pointer and length */
+       skb_put(n, skb->len);
+       n->csum      = skb->csum;
+       n->ip_summed = skb->ip_summed;
+
+       if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
+               BUG();
+
+       copy_skb_header(n, skb);
+       return n;
+}
+
+
+/**
+ *     pskb_copy       -       create copy of an sk_buff with private head.
+ *     @skb: buffer to copy
+ *     @gfp_mask: allocation priority
+ *
+ *     Make a copy of both an &sk_buff and part of its data, located
+ *     in header. Fragmented data remain shared. This is used when
+ *     the caller wishes to modify only header of &sk_buff and needs
+ *     private copy of the header to alter. Returns %NULL on failure
+ *     or the pointer to the buffer on success.
+ *     The returned buffer has a reference count of 1.
+ */
+
+struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
+{
+       /*
+        *      Allocate the copy buffer
+        */
+       struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
+
+       if (!n)
+               goto out;
+
+       /* Set the data pointer */
+       skb_reserve(n, skb->data - skb->head);
+       /* Set the tail pointer and length */
+       skb_put(n, skb_headlen(skb));
+       /* Copy the bytes */
+       memcpy(n->data, skb->data, n->len);
+       n->csum      = skb->csum;
+       n->ip_summed = skb->ip_summed;
+
+       n->data_len  = skb->data_len;
+       n->len       = skb->len;
+
+       if (skb_shinfo(skb)->nr_frags) {
+               int i;
+
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+                       get_page(skb_shinfo(n)->frags[i].page);
+               }
+               skb_shinfo(n)->nr_frags = i;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
+               skb_clone_fraglist(n);
+       }
+
+       copy_skb_header(n, skb);
+out:
+       return n;
+}
+
+/**
+ *     pskb_expand_head - reallocate header of &sk_buff
+ *     @skb: buffer to reallocate
+ *     @nhead: room to add at head
+ *     @ntail: room to add at tail
+ *     @gfp_mask: allocation priority
+ *
+ *     Expands (or creates identical copy, if &nhead and &ntail are zero)
+ *     header of skb. &sk_buff itself is not changed. &sk_buff MUST have
+ *     reference count of 1. Returns zero in the case of success or error,
+ *     if expansion failed. In the last case, &sk_buff is not changed.
+ *
+ *     All the pointers pointing into skb header may change and must be
+ *     reloaded after call to this function.
+ */
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
+{
+       int i;
+       u8 *data;
+       int size = nhead + (skb->end - skb->head) + ntail;
+       long off;
+
+       if (skb_shared(skb))
+               BUG();
+
+       size = SKB_DATA_ALIGN(size);
+
+       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+       if (!data)
+               goto nodata;
+
+       /* Copy only real data... and, alas, header. This should be
+        * optimized for the cases when header is void. */
+       memcpy(data + nhead, skb->head, skb->tail - skb->head);
+       memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+               get_page(skb_shinfo(skb)->frags[i].page);
+
+       if (skb_shinfo(skb)->frag_list)
+               skb_clone_fraglist(skb);
+
+       skb_release_data(skb);
+
+       off = (data + nhead) - skb->head;
+
+       skb->head     = data;
+       skb->end      = data + size;
+       skb->data    += off;
+       skb->tail    += off;
+       skb->mac.raw += off;
+       skb->h.raw   += off;
+       skb->nh.raw  += off;
+       skb->cloned   = 0;
+       atomic_set(&skb_shinfo(skb)->dataref, 1);
+       return 0;
+
+nodata:
+       return -ENOMEM;
+}
+
+/* Make private copy of skb with writable head and some headroom */
+
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
+{
+       struct sk_buff *skb2;
+       int delta = headroom - skb_headroom(skb);
+
+       if (delta <= 0)
+               skb2 = pskb_copy(skb, GFP_ATOMIC);
+       else {
+               skb2 = skb_clone(skb, GFP_ATOMIC);
+               if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
+                                            GFP_ATOMIC)) {
+                       kfree_skb(skb2);
+                       skb2 = NULL;
+               }
+       }
+       return skb2;
+}
+
+
+/**
+ *     skb_copy_expand -       copy and expand sk_buff
+ *     @skb: buffer to copy
+ *     @newheadroom: new free bytes at head
+ *     @newtailroom: new free bytes at tail
+ *     @gfp_mask: allocation priority
+ *
+ *     Make a copy of both an &sk_buff and its data and while doing so
+ *     allocate additional space.
+ *
+ *     This is used when the caller wishes to modify the data and needs a
+ *     private copy of the data to alter as well as more space for new fields.
+ *     Returns %NULL on failure or the pointer to the buffer
+ *     on success. The returned buffer has a reference count of 1.
+ *
+ *     You must pass %GFP_ATOMIC as the allocation priority if this function
+ *     is called from an interrupt.
+ *
+ *     BUG ALERT: ip_summed is not copied. Why does this work? Is it used
+ *     only by netfilter in the cases when checksum is recalculated? --ANK
+ */
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+                               int newheadroom, int newtailroom, int gfp_mask)
+{
+       /*
+        *      Allocate the copy buffer
+        */
+       struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
+                                     gfp_mask);
+       int head_copy_len, head_copy_off;
+
+       if (!n)
+               return NULL;
+
+       skb_reserve(n, newheadroom);
+
+       /* Set the tail pointer and length */
+       skb_put(n, skb->len);
+
+       head_copy_len = skb_headroom(skb);
+       head_copy_off = 0;
+       if (newheadroom <= head_copy_len)
+               head_copy_len = newheadroom;
+       else
+               head_copy_off = newheadroom - head_copy_len;
+
+       /* Copy the linear header and data. */
+       if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
+                         skb->len + head_copy_len))
+               BUG();
+
+       copy_skb_header(n, skb);
+
+       return n;
+}
+
+/**
+ *     skb_pad                 -       zero pad the tail of an skb
+ *     @skb: buffer to pad
+ *     @pad: space to pad
+ *
+ *     Ensure that a buffer is followed by a padding area that is zero
+ *     filled. Used by network drivers which may DMA or transfer data
+ *     beyond the buffer end onto the wire.
+ *
+ *     May return NULL in out of memory cases.
+ */
+struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
+{
+       struct sk_buff *nskb;
+       
+       /* If the skbuff is non linear tailroom is always zero.. */
+       if (skb_tailroom(skb) >= pad) {
+               memset(skb->data+skb->len, 0, pad);
+               return skb;
+       }
+       
+       nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
+       kfree_skb(skb);
+       if (nskb)
+               memset(nskb->data+nskb->len, 0, pad);
+       return nskb;
+}      
+/* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
+ * If realloc==0 and trimming is impossible without change of data,
+ * it is BUG().
+ */
+
+int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
+{
+       int offset = skb_headlen(skb);
+       int nfrags = skb_shinfo(skb)->nr_frags;
+       int i;
+
+       for (i = 0; i < nfrags; i++) {
+               int end = offset + skb_shinfo(skb)->frags[i].size;
+               if (end > len) {
+                       if (skb_cloned(skb)) {
+                               if (!realloc)
+                                       BUG();
+                               if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+                                       return -ENOMEM;
+                       }
+                       if (len <= offset) {
+                               put_page(skb_shinfo(skb)->frags[i].page);
+                               skb_shinfo(skb)->nr_frags--;
+                       } else {
+                               skb_shinfo(skb)->frags[i].size = len - offset;
+                       }
+               }
+               offset = end;
+       }
+
+       if (offset < len) {
+               skb->data_len -= skb->len - len;
+               skb->len       = len;
+       } else {
+               if (len <= skb_headlen(skb)) {
+                       skb->len      = len;
+                       skb->data_len = 0;
+                       skb->tail     = skb->data + len;
+                       if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
+                               skb_drop_fraglist(skb);
+               } else {
+                       skb->data_len -= skb->len - len;
+                       skb->len       = len;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ *     __pskb_pull_tail - advance tail of skb header
+ *     @skb: buffer to reallocate
+ *     @delta: number of bytes to advance tail
+ *
+ *     The function makes a sense only on a fragmented &sk_buff,
+ *     it expands header moving its tail forward and copying necessary
+ *     data from fragmented part.
+ *
+ *     &sk_buff MUST have reference count of 1.
+ *
+ *     Returns %NULL (and &sk_buff does not change) if pull failed
+ *     or value of new tail of skb in the case of success.
+ *
+ *     All the pointers pointing into skb header may change and must be
+ *     reloaded after call to this function.
+ */
+
+/* Moves tail of skb head forward, copying data from fragmented part,
+ * when it is necessary.
+ * 1. It may fail due to malloc failure.
+ * 2. It may change skb pointers.
+ *
+ * It is pretty complicated. Luckily, it is called only in exceptional cases.
+ */
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
+{
+       /* If skb has not enough free space at tail, get new one
+        * plus 128 bytes for future expansions. If we have enough
+        * room at tail, reallocate without expansion only if skb is cloned.
+        */
+       int i, k, eat = (skb->tail + delta) - skb->end;
+
+       if (eat > 0 || skb_cloned(skb)) {
+               if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
+                                    GFP_ATOMIC))
+                       return NULL;
+       }
+
+       if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
+               BUG();
+
+       /* Optimization: no fragments, no reasons to preestimate
+        * size of pulled pages. Superb.
+        */
+       if (!skb_shinfo(skb)->frag_list)
+               goto pull_pages;
+
+       /* Estimate size of pulled pages. */
+       eat = delta;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size >= eat)
+                       goto pull_pages;
+               eat -= skb_shinfo(skb)->frags[i].size;
+       }
+
+       /* If we need update frag list, we are in troubles.
+        * Certainly, it possible to add an offset to skb data,
+        * but taking into account that pulling is expected to
+        * be very rare operation, it is worth to fight against
+        * further bloating skb head and crucify ourselves here instead.
+        * Pure masohism, indeed. 8)8)
+        */
+       if (eat) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+               struct sk_buff *clone = NULL;
+               struct sk_buff *insp = NULL;
+
+               do {
+                       if (!list)
+                               BUG();
+
+                       if (list->len <= eat) {
+                               /* Eaten as whole. */
+                               eat -= list->len;
+                               list = list->next;
+                               insp = list;
+                       } else {
+                               /* Eaten partially. */
+
+                               if (skb_shared(list)) {
+                                       /* Sucks! We need to fork list. :-( */
+                                       clone = skb_clone(list, GFP_ATOMIC);
+                                       if (!clone)
+                                               return NULL;
+                                       insp = list->next;
+                                       list = clone;
+                               } else {
+                                       /* This may be pulled without
+                                        * problems. */
+                                       insp = list;
+                               }
+                               if (!pskb_pull(list, eat)) {
+                                       if (clone)
+                                               kfree_skb(clone);
+                                       return NULL;
+                               }
+                               break;
+                       }
+               } while (eat);
+
+               /* Free pulled out fragments. */
+               while ((list = skb_shinfo(skb)->frag_list) != insp) {
+                       skb_shinfo(skb)->frag_list = list->next;
+                       kfree_skb(list);
+               }
+               /* And insert new clone at head. */
+               if (clone) {
+                       clone->next = list;
+                       skb_shinfo(skb)->frag_list = clone;
+               }
+       }
+       /* Success! Now we may commit changes to skb data. */
+
+pull_pages:
+       eat = delta;
+       k = 0;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               if (skb_shinfo(skb)->frags[i].size <= eat) {
+                       put_page(skb_shinfo(skb)->frags[i].page);
+                       eat -= skb_shinfo(skb)->frags[i].size;
+               } else {
+                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+                       if (eat) {
+                               skb_shinfo(skb)->frags[k].page_offset += eat;
+                               skb_shinfo(skb)->frags[k].size -= eat;
+                               eat = 0;
+                       }
+                       k++;
+               }
+       }
+       skb_shinfo(skb)->nr_frags = k;
+
+       skb->tail     += delta;
+       skb->data_len -= delta;
+
+       return skb->tail;
+}
+
+/* Copy some data bits from skb to kernel buffer. */
+
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
+{
+       int i, copy;
+       int start = skb_headlen(skb);
+
+       if (offset > (int)skb->len - len)
+               goto fault;
+
+       /* Copy header. */
+       if ((copy = start - offset) > 0) {
+               if (copy > len)
+                       copy = len;
+               memcpy(to, skb->data + offset, copy);
+               if ((len -= copy) == 0)
+                       return 0;
+               offset += copy;
+               to     += copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       u8 *vaddr;
+
+                       if (copy > len)
+                               copy = len;
+
+                       vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+                       memcpy(to,
+                              vaddr + skb_shinfo(skb)->frags[i].page_offset+
+                              offset - start, copy);
+                       kunmap_skb_frag(vaddr);
+
+                       if ((len -= copy) == 0)
+                               return 0;
+                       offset += copy;
+                       to     += copy;
+               }
+               start = end;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
+
+                       end = start + list->len;
+                       if ((copy = end - offset) > 0) {
+                               if (copy > len)
+                                       copy = len;
+                               if (skb_copy_bits(list, offset - start,
+                                                 to, copy))
+                                       goto fault;
+                               if ((len -= copy) == 0)
+                                       return 0;
+                               offset += copy;
+                               to     += copy;
+                       }
+                       start = end;
+               }
+       }
+       if (!len)
+               return 0;
+
+fault:
+       return -EFAULT;
+}
+
+/* Keep iterating until skb_iter_next returns false. */
+void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
+{
+       i->len = skb_headlen(skb);
+       i->data = (unsigned char *)skb->data;
+       i->nextfrag = 0;
+       i->fraglist = NULL;
+}
+
+int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
+{
+       /* Unmap previous, if not head fragment. */
+       if (i->nextfrag)
+               kunmap_skb_frag(i->data);
+
+       if (i->fraglist) {
+       fraglist:
+               /* We're iterating through fraglist. */
+               if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
+                       i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
+                                               ->frags[i->nextfrag]);
+                       i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
+                               .size;
+                       i->nextfrag++;
+                       return 1;
+               }
+               /* Fragments with fragments?  Too hard! */
+               BUG_ON(skb_shinfo(i->fraglist)->frag_list);
+               i->fraglist = i->fraglist->next;
+               if (!i->fraglist)
+                       goto end;
+
+               i->len = skb_headlen(i->fraglist);
+               i->data = i->fraglist->data;
+               i->nextfrag = 0;
+               return 1;
+       }
+
+       if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
+               i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
+               i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
+               i->nextfrag++;
+               return 1;
+       }
+
+       i->fraglist = skb_shinfo(skb)->frag_list;
+       if (i->fraglist)
+               goto fraglist;
+
+end:
+       /* Bug trap for callers */
+       i->data = NULL;
+       return 0;
+}
+
+void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
+{
+       /* Unmap previous, if not head fragment. */
+       if (i->data && i->nextfrag)
+               kunmap_skb_frag(i->data);
+       /* Bug trap for callers */
+       i->data = NULL;
+}
+
+/* Checksum skb data. */
+
+unsigned int skb_checksum(const struct sk_buff *skb, int offset,
+                         int len, unsigned int csum)
+{
+       int start = skb_headlen(skb);
+       int i, copy = start - offset;
+       int pos = 0;
+
+       /* Checksum header. */
+       if (copy > 0) {
+               if (copy > len)
+                       copy = len;
+               csum = csum_partial(skb->data + offset, copy, csum);
+               if ((len -= copy) == 0)
+                       return csum;
+               offset += copy;
+               pos     = copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       unsigned int csum2;
+                       u8 *vaddr;
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       if (copy > len)
+                               copy = len;
+                       vaddr = kmap_skb_frag(frag);
+                       csum2 = csum_partial(vaddr + frag->page_offset +
+                                            offset - start, copy, 0);
+                       kunmap_skb_frag(vaddr);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if (!(len -= copy))
+                               return csum;
+                       offset += copy;
+                       pos    += copy;
+               }
+               start = end;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
+
+                       end = start + list->len;
+                       if ((copy = end - offset) > 0) {
+                               unsigned int csum2;
+                               if (copy > len)
+                                       copy = len;
+                               csum2 = skb_checksum(list, offset - start,
+                                                    copy, 0);
+                               csum = csum_block_add(csum, csum2, pos);
+                               if ((len -= copy) == 0)
+                                       return csum;
+                               offset += copy;
+                               pos    += copy;
+                       }
+                       start = end;
+               }
+       }
+       if (len)
+               BUG();
+
+       return csum;
+}
+
+/* Both of above in one bottle. */
+
+unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
+                                   u8 *to, int len, unsigned int csum)
+{
+       int start = skb_headlen(skb);
+       int i, copy = start - offset;
+       int pos = 0;
+
+       /* Copy header. */
+       if (copy > 0) {
+               if (copy > len)
+                       copy = len;
+               csum = csum_partial_copy_nocheck(skb->data + offset, to,
+                                                copy, csum);
+               if ((len -= copy) == 0)
+                       return csum;
+               offset += copy;
+               to     += copy;
+               pos     = copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               int end;
+
+               BUG_TRAP(start <= offset + len);
+
+               end = start + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       unsigned int csum2;
+                       u8 *vaddr;
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       if (copy > len)
+                               copy = len;
+                       vaddr = kmap_skb_frag(frag);
+                       csum2 = csum_partial_copy_nocheck(vaddr +
+                                                         frag->page_offset +
+                                                         offset - start, to,
+                                                         copy, 0);
+                       kunmap_skb_frag(vaddr);
+                       csum = csum_block_add(csum, csum2, pos);
+                       if (!(len -= copy))
+                               return csum;
+                       offset += copy;
+                       to     += copy;
+                       pos    += copy;
+               }
+               start = end;
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       unsigned int csum2;
+                       int end;
+
+                       BUG_TRAP(start <= offset + len);
+
+                       end = start + list->len;
+                       if ((copy = end - offset) > 0) {
+                               if (copy > len)
+                                       copy = len;
+                               csum2 = skb_copy_and_csum_bits(list,
+                                                              offset - start,
+                                                              to, copy, 0);
+                               csum = csum_block_add(csum, csum2, pos);
+                               if ((len -= copy) == 0)
+                                       return csum;
+                               offset += copy;
+                               to     += copy;
+                               pos    += copy;
+                       }
+                       start = end;
+               }
+       }
+       if (len)
+               BUG();
+       return csum;
+}
+
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
+{
+       unsigned int csum;
+       long csstart;
+
+       if (skb->ip_summed == CHECKSUM_HW)
+               csstart = skb->h.raw - skb->data;
+       else
+               csstart = skb_headlen(skb);
+
+       if (csstart > skb_headlen(skb))
+               BUG();
+
+       memcpy(to, skb->data, csstart);
+
+       csum = 0;
+       if (csstart != skb->len)
+               csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
+                                             skb->len - csstart, 0);
+
+       if (skb->ip_summed == CHECKSUM_HW) {
+               long csstuff = csstart + skb->csum;
+
+               *((unsigned short *)(to + csstuff)) = csum_fold(csum);
+       }
+}
+
+/**
+ *     skb_dequeue - remove from the head of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the head of the list. The list lock is taken so the function
+ *     may be used safely with other locking list functions. The head item is
+ *     returned or %NULL if the list is empty.
+ */
+
+struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+       unsigned long flags;
+       struct sk_buff *result;
+
+       spin_lock_irqsave(&list->lock, flags);
+       result = __skb_dequeue(list);
+       spin_unlock_irqrestore(&list->lock, flags);
+       return result;
+}
+
+/**
+ *     skb_dequeue_tail - remove from the tail of the queue
+ *     @list: list to dequeue from
+ *
+ *     Remove the tail of the list. The list lock is taken so the function
+ *     may be used safely with other locking list functions. The tail item is
+ *     returned or %NULL if the list is empty.
+ */
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
+{
+       unsigned long flags;
+       struct sk_buff *result;
+
+       spin_lock_irqsave(&list->lock, flags);
+       result = __skb_dequeue_tail(list);
+       spin_unlock_irqrestore(&list->lock, flags);
+       return result;
+}
+
+/**
+ *     skb_queue_purge - empty a list
+ *     @list: list to empty
+ *
+ *     Delete all buffers on an &sk_buff list. Each buffer is removed from
+ *     the list and one reference dropped. This function takes the list
+ *     lock and is atomic with respect to other list locking functions.
+ */
+void skb_queue_purge(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+       while ((skb = skb_dequeue(list)) != NULL)
+               kfree_skb(skb);
+}
+
+/**
+ *     skb_queue_head - queue a buffer at the list head
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the start of the list. This function takes the
+ *     list lock and can be used safely with other locking &sk_buff functions
+ *     safely.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_queue_head(list, newsk);
+       spin_unlock_irqrestore(&list->lock, flags);
+}
+
+/**
+ *     skb_queue_tail - queue a buffer at the list tail
+ *     @list: list to use
+ *     @newsk: buffer to queue
+ *
+ *     Queue a buffer at the tail of the list. This function takes the
+ *     list lock and can be used safely with other locking &sk_buff functions
+ *     safely.
+ *
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_queue_tail(list, newsk);
+       spin_unlock_irqrestore(&list->lock, flags);
+}
+/**
+ *     skb_unlink      -       remove a buffer from a list
+ *     @skb: buffer to remove
+ *
+ *     Place a packet after a given packet in a list. The list locks are taken
+ *     and this function is atomic with respect to other list locked calls
+ *
+ *     Works even without knowing the list it is sitting on, which can be
+ *     handy at times. It also means that THE LIST MUST EXIST when you
+ *     unlink. Thus a list must have its contents unlinked before it is
+ *     destroyed.
+ */
+void skb_unlink(struct sk_buff *skb)
+{
+       struct sk_buff_head *list = skb->list;
+
+       if (list) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&list->lock, flags);
+               if (skb->list == list)
+                       __skb_unlink(skb, skb->list);
+               spin_unlock_irqrestore(&list->lock, flags);
+       }
+}
+
+
+/**
+ *     skb_append      -       append a buffer
+ *     @old: buffer to insert after
+ *     @newsk: buffer to insert
+ *
+ *     Place a packet after a given packet in a list. The list locks are taken
+ *     and this function is atomic with respect to other list locked calls.
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+
+void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&old->list->lock, flags);
+       __skb_append(old, newsk);
+       spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+
+/**
+ *     skb_insert      -       insert a buffer
+ *     @old: buffer to insert before
+ *     @newsk: buffer to insert
+ *
+ *     Place a packet before a given packet in a list. The list locks are taken
+ *     and this function is atomic with respect to other list locked calls
+ *     A buffer cannot be placed on two lists at the same time.
+ */
+
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&old->list->lock, flags);
+       __skb_insert(newsk, old->prev, old, old->list);
+       spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+#if 0
+/*
+ *     Tune the memory allocator for a new MTU size.
+ */
+void skb_add_mtu(int mtu)
+{
+       /* Must match allocation in alloc_skb */
+       mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
+
+       kmem_add_cache_size(mtu);
+}
+#endif
+
+static inline void skb_split_inside_header(struct sk_buff *skb,
+                                          struct sk_buff* skb1,
+                                          const u32 len, const int pos)
+{
+       int i;
+
+       memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
+
+       /* And move data appendix as is. */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+               skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
+
+       skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
+       skb_shinfo(skb)->nr_frags  = 0;
+       skb1->data_len             = skb->data_len;
+       skb1->len                  += skb1->data_len;
+       skb->data_len              = 0;
+       skb->len                   = len;
+       skb->tail                  = skb->data + len;
+}
+
+static inline void skb_split_no_header(struct sk_buff *skb,
+                                      struct sk_buff* skb1,
+                                      const u32 len, int pos)
+{
+       int i, k = 0;
+       const int nfrags = skb_shinfo(skb)->nr_frags;
+
+       skb_shinfo(skb)->nr_frags = 0;
+       skb1->len                 = skb1->data_len = skb->len - len;
+       skb->len                  = len;
+       skb->data_len             = len - pos;
+
+       for (i = 0; i < nfrags; i++) {
+               int size = skb_shinfo(skb)->frags[i].size;
+
+               if (pos + size > len) {
+                       skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
+
+                       if (pos < len) {
+                               /* Split frag.
+                                * We have to variants in this case:
+                                * 1. Move all the frag to the second
+                                *    part, if it is possible. F.e.
+                                *    this approach is mandatory for TUX,
+                                *    where splitting is expensive.
+                                * 2. Split is accurately. We make this.
+                                */
+                               get_page(skb_shinfo(skb)->frags[i].page);
+                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+                               skb_shinfo(skb1)->frags[0].size -= len - pos;
+                               skb_shinfo(skb)->frags[i].size  = len - pos;
+                               skb_shinfo(skb)->nr_frags++;
+                       }
+                       k++;
+               } else
+                       skb_shinfo(skb)->nr_frags++;
+               pos += size;
+       }
+       skb_shinfo(skb1)->nr_frags = k;
+}
+
+/**
+ * skb_split - Split fragmented skb to two parts at length len.
+ */
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
+{
+       int pos = skb_headlen(skb);
+
+       if (len < pos)  /* Split line is inside header. */
+               skb_split_inside_header(skb, skb1, len, pos);
+       else            /* Second chunk has no header, nothing to copy. */
+               skb_split_no_header(skb, skb1, len, pos);
+}
+
+void __init skb_init(void)
+{
+       skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+                                             sizeof(struct sk_buff),
+                                             0,
+                                             SLAB_HWCACHE_ALIGN,
+                                             NULL, NULL);
+       if (!skbuff_head_cache)
+               panic("cannot create skbuff cache");
+}
+
+EXPORT_SYMBOL(___pskb_trim);
+EXPORT_SYMBOL(__kfree_skb);
+EXPORT_SYMBOL(__pskb_pull_tail);
+EXPORT_SYMBOL(alloc_skb);
+EXPORT_SYMBOL(pskb_copy);
+EXPORT_SYMBOL(pskb_expand_head);
+EXPORT_SYMBOL(skb_checksum);
+EXPORT_SYMBOL(skb_clone);
+EXPORT_SYMBOL(skb_clone_fraglist);
+EXPORT_SYMBOL(skb_copy);
+EXPORT_SYMBOL(skb_copy_and_csum_bits);
+EXPORT_SYMBOL(skb_copy_and_csum_dev);
+EXPORT_SYMBOL(skb_copy_bits);
+EXPORT_SYMBOL(skb_copy_expand);
+EXPORT_SYMBOL(skb_over_panic);
+EXPORT_SYMBOL(skb_pad);
+EXPORT_SYMBOL(skb_realloc_headroom);
+EXPORT_SYMBOL(skb_under_panic);
+EXPORT_SYMBOL(skb_dequeue);
+EXPORT_SYMBOL(skb_dequeue_tail);
+EXPORT_SYMBOL(skb_insert);
+EXPORT_SYMBOL(skb_queue_purge);
+EXPORT_SYMBOL(skb_queue_head);
+EXPORT_SYMBOL(skb_queue_tail);
+EXPORT_SYMBOL(skb_unlink);
+EXPORT_SYMBOL(skb_append);
+EXPORT_SYMBOL(skb_split);
+EXPORT_SYMBOL(skb_iter_first);
+EXPORT_SYMBOL(skb_iter_next);
+EXPORT_SYMBOL(skb_iter_abort);
index 839cc8dade5889caed9ed1fc95823f2aa981c2bf..87a5ce0a3254c35e7a0515bef766360d88323335 100644 (file)
@@ -12,7 +12,8 @@
 typedef struct {
     memory_t addr;   /*  0: Machine address of packet.  */
     MEMORY_PADDING;
-    u16      id;     /*  8: Echoed in response message. */
+    u16      csum_blank:1; /* Proto csum field blank?   */
+    u16      id:15;  /*  8: Echoed in response message. */
     u16      size;   /* 10: Packet size in bytes.       */
 } PACKED netif_tx_request_t; /* 12 bytes */
 
@@ -29,7 +30,8 @@ typedef struct {
 typedef struct {
     memory_t addr;   /*  0: Machine address of packet.              */
     MEMORY_PADDING;
-    u16      id;     /*  8:  */
+    u16      csum_valid:1; /* Protocol checksum is validated?       */
+    u16      id:15;  /*  8:  */
     s16      status; /* 10: -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
 } PACKED netif_rx_response_t; /* 12 bytes */